diff --git a/cmd/raidcli/main.go b/cmd/raidcli/main.go index 021c88fe092..bf6b0181bcb 100644 --- a/cmd/raidcli/main.go +++ b/cmd/raidcli/main.go @@ -32,7 +32,7 @@ type BaseOptions struct { Host string `help:"SSH Host IP" default:"$RAID_HOST" metavar:"RAID_HOST"` Username string `help:"Username, usually root" default:"$RAID_USERNAME" metavar:"RAID_USERNAME"` Password string `help:"Password" default:"$RAID_PASSWORD" metavar:"RAID_PASSWORD"` - Driver string `help:"Raid dirver" default:"$RAID_DRIVER" metavar:"RAID_DRIVER" choices:"MegaRaid|HPSARaid|Mpt2SAS|MarvelRaid"` + Driver string `help:"Raid driver" default:"$RAID_DRIVER" metavar:"RAID_DRIVER" choices:"MegaRaid|HPSARaid|Mpt2SAS|MarvelRaid"` LocalHost bool `help:"Run raidcli in localhost"` SUBCOMMAND string `help:"s3cli subcommand" subcommand:"true"` } diff --git a/go.mod b/go.mod index 58f02ab0b60..a050a5581b8 100644 --- a/go.mod +++ b/go.mod @@ -93,7 +93,7 @@ require ( k8s.io/cri-api v0.22.17 k8s.io/klog/v2 v2.2.0 moul.io/http2curl/v2 v2.3.0 - yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240428085659-1415194185cf + yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240429100059-f7a31bba5e42 yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32 yunion.io/x/jsonutils v1.0.1-0.20240203102553-4096f103b401 yunion.io/x/log v1.0.1-0.20240305175729-7cf2d6cd5a91 diff --git a/go.sum b/go.sum index be32e7882b1..16f9e91b358 100644 --- a/go.sum +++ b/go.sum @@ -1225,8 +1225,8 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240428085659-1415194185cf h1:6fJNf4K5NfCHr2WgcPzaF8jfw/YsNw9TilpEO8MeyZI= -yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240428085659-1415194185cf/go.mod h1:PkfAKJcu5mic7jyJpT4GxZ8BKIVMa+6eyOP2G94VKAs= +yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240429100059-f7a31bba5e42 h1:rtPLQInqDukZELOGosWtx0+Ffl3rSBW3JKpyr5C77E4= +yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240429100059-f7a31bba5e42/go.mod h1:PkfAKJcu5mic7jyJpT4GxZ8BKIVMa+6eyOP2G94VKAs= yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32 h1:v7POYkQwo1XzOxBoIoRVr/k0V9Y5JyjpshlIFa9raug= yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32/go.mod h1:Uxuou9WQIeJXNpy7t2fPLL0BYLvLiMvGQwY7Qc6aSws= yunion.io/x/jsonutils v0.0.0-20190625054549-a964e1e8a051/go.mod h1:4N0/RVzsYL3kH3WE/H1BjUQdFiWu50JGCFQuuy+Z634= diff --git a/pkg/apigateway/handler/auth.go b/pkg/apigateway/handler/auth.go index 3e96e8faac5..3b3e1588403 100644 --- a/pkg/apigateway/handler/auth.go +++ b/pkg/apigateway/handler/auth.go @@ -34,7 +34,6 @@ import ( "yunion.io/x/onecloud/pkg/apigateway/options" policytool "yunion.io/x/onecloud/pkg/apigateway/policy" agapi "yunion.io/x/onecloud/pkg/apis/apigateway" - "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/appsrv" "yunion.io/x/onecloud/pkg/cloudcommon/policy" "yunion.io/x/onecloud/pkg/httperrors" @@ -1070,25 +1069,6 @@ func getUserInfo2(s *mcclient.ClientSession, uid string, pid string, loginIp str menus.Add(item) } - log.Infof("getUserInfo modules.Hosts.Get") - // s2 := auth.GetSession(ctx, token, FetchRegion(req), "v2") - params := jsonutils.NewDict() - params.Add(jsonutils.NewString("host_type"), "field") - params.Add(jsonutils.NewString("system"), "scope") - params.Add(jsonutils.JSONTrue, "usable") - params.Add(jsonutils.JSONTrue, "show_emulated") - cap, err := compute_modules.Hosts.Get(s, "distinct-field", params) - if err != nil { - log.Errorf("modules.Servers.Get distinct-field fail %s", err) - } else { - hostTypes, _ := jsonutils.GetStringArray(cap, "host_type") - hypervisors := make([]string, len(hostTypes)) - for i, hostType := range hostTypes { - hypervisors[i] = compute.HOSTTYPE_HYPERVISOR[hostType] - } - data.Add(jsonutils.NewStringArray(hypervisors), "hypervisors") - } - data.Add(menus, "menus") data.Add(k8s, "k8sdashboard") data.Add(services, "services") diff --git a/pkg/apis/compute/api.go b/pkg/apis/compute/api.go index f3ea07d580f..2b90f0b2e6e 100644 --- a/pkg/apis/compute/api.go +++ b/pkg/apis/compute/api.go @@ -350,6 +350,9 @@ type ServerConfigs struct { // default: kvm Hypervisor string `json:"hypervisor"` + // swagger: ignore + Provider string `json:"provider"` + // 包年包月资源池 // swagger:ignore // emum: shared, prepaid, dedicated diff --git a/pkg/apis/compute/guest_const.go b/pkg/apis/compute/guest_const.go index c5032677565..6f791e5dc14 100644 --- a/pkg/apis/compute/guest_const.go +++ b/pkg/apis/compute/guest_const.go @@ -205,7 +205,6 @@ const ( HYPERVISOR_CTYUN = compute.HYPERVISOR_CTYUN HYPERVISOR_ECLOUD = compute.HYPERVISOR_ECLOUD HYPERVISOR_JDCLOUD = compute.HYPERVISOR_JDCLOUD - HYPERVISOR_CLOUDPODS = compute.HYPERVISOR_CLOUDPODS HYPERVISOR_NUTANIX = compute.HYPERVISOR_NUTANIX HYPERVISOR_BINGO_CLOUD = compute.HYPERVISOR_BINGO_CLOUD HYPERVISOR_INCLOUD_SPHERE = compute.HYPERVISOR_INCLOUD_SPHERE @@ -284,7 +283,6 @@ var HYPERVISORS = []string{ HYPERVISOR_CTYUN, HYPERVISOR_ECLOUD, HYPERVISOR_JDCLOUD, - HYPERVISOR_CLOUDPODS, HYPERVISOR_NUTANIX, HYPERVISOR_BINGO_CLOUD, HYPERVISOR_INCLOUD_SPHERE, @@ -298,121 +296,6 @@ var HYPERVISORS = []string{ HYPERVISOR_ORACLE, } -var ONECLOUD_HYPERVISORS = []string{ - HYPERVISOR_BAREMETAL, - HYPERVISOR_KVM, - HYPERVISOR_POD, -} - -var PUBLIC_CLOUD_HYPERVISORS = []string{ - HYPERVISOR_ALIYUN, - HYPERVISOR_AWS, - HYPERVISOR_AZURE, - HYPERVISOR_QCLOUD, - HYPERVISOR_HUAWEI, - HYPERVISOR_UCLOUD, - HYPERVISOR_VOLCENGINE, - HYPERVISOR_GOOGLE, - HYPERVISOR_CTYUN, - HYPERVISOR_ECLOUD, - HYPERVISOR_JDCLOUD, - HYPERVISOR_KSYUN, - HYPERVISOR_BAIDU, - HYPERVISOR_CUCLOUD, - HYPERVISOR_QINGCLOUD, - HYPERVISOR_ORACLE, -} - -var PRIVATE_CLOUD_HYPERVISORS = []string{ - HYPERVISOR_ZSTACK, - HYPERVISOR_OPENSTACK, - HYPERVISOR_APSARA, - HYPERVISOR_CLOUDPODS, - HYPERVISOR_HCSO, - HYPERVISOR_HCS, - HYPERVISOR_HCSOP, - HYPERVISOR_NUTANIX, - HYPERVISOR_BINGO_CLOUD, - HYPERVISOR_INCLOUD_SPHERE, - HYPERVISOR_PROXMOX, - HYPERVISOR_REMOTEFILE, - HYPERVISOR_H3C, -} - -// var HYPERVISORS = []string{HYPERVISOR_ALIYUN} - -var HYPERVISOR_HOSTTYPE = map[string]string{ - HYPERVISOR_KVM: HOST_TYPE_HYPERVISOR, - HYPERVISOR_BAREMETAL: HOST_TYPE_BAREMETAL, - HYPERVISOR_ESXI: HOST_TYPE_ESXI, - HYPERVISOR_POD: HOST_TYPE_CONTAINER, - HYPERVISOR_ALIYUN: HOST_TYPE_ALIYUN, - HYPERVISOR_APSARA: HOST_TYPE_APSARA, - HYPERVISOR_AZURE: HOST_TYPE_AZURE, - HYPERVISOR_AWS: HOST_TYPE_AWS, - HYPERVISOR_QCLOUD: HOST_TYPE_QCLOUD, - HYPERVISOR_HUAWEI: HOST_TYPE_HUAWEI, - HYPERVISOR_HCSO: HOST_TYPE_HCSO, - HYPERVISOR_HCSOP: HOST_TYPE_HCSOP, - HYPERVISOR_HCS: HOST_TYPE_HCS, - HYPERVISOR_OPENSTACK: HOST_TYPE_OPENSTACK, - HYPERVISOR_UCLOUD: HOST_TYPE_UCLOUD, - HYPERVISOR_VOLCENGINE: HOST_TYPE_VOLCENGINE, - HYPERVISOR_ZSTACK: HOST_TYPE_ZSTACK, - HYPERVISOR_GOOGLE: HOST_TYPE_GOOGLE, - HYPERVISOR_CTYUN: HOST_TYPE_CTYUN, - HYPERVISOR_ECLOUD: HOST_TYPE_ECLOUD, - HYPERVISOR_JDCLOUD: HOST_TYPE_JDCLOUD, - HYPERVISOR_CLOUDPODS: HOST_TYPE_CLOUDPODS, - HYPERVISOR_NUTANIX: HOST_TYPE_NUTANIX, - HYPERVISOR_BINGO_CLOUD: HOST_TYPE_BINGO_CLOUD, - HYPERVISOR_INCLOUD_SPHERE: HOST_TYPE_INCLOUD_SPHERE, - HYPERVISOR_PROXMOX: HOST_TYPE_PROXMOX, - HYPERVISOR_REMOTEFILE: HOST_TYPE_REMOTEFILE, - HYPERVISOR_H3C: HOST_TYPE_H3C, - HYPERVISOR_KSYUN: HOST_TYPE_KSYUN, - HYPERVISOR_BAIDU: HOST_TYPE_BAIDU, - HYPERVISOR_CUCLOUD: HOST_TYPE_CUCLOUD, - HYPERVISOR_QINGCLOUD: HOST_TYPE_QINGCLOUD, - HYPERVISOR_ORACLE: HOST_TYPE_ORACLE, -} - -var HOSTTYPE_HYPERVISOR = map[string]string{ - HOST_TYPE_HYPERVISOR: HYPERVISOR_KVM, - HOST_TYPE_BAREMETAL: HYPERVISOR_BAREMETAL, - HOST_TYPE_ESXI: HYPERVISOR_ESXI, - HOST_TYPE_CONTAINER: HYPERVISOR_POD, - HOST_TYPE_ALIYUN: HYPERVISOR_ALIYUN, - HOST_TYPE_APSARA: HYPERVISOR_APSARA, - HOST_TYPE_AZURE: HYPERVISOR_AZURE, - HOST_TYPE_AWS: HYPERVISOR_AWS, - HOST_TYPE_QCLOUD: HYPERVISOR_QCLOUD, - HOST_TYPE_HUAWEI: HYPERVISOR_HUAWEI, - HOST_TYPE_HCSO: HYPERVISOR_HCSO, - HOST_TYPE_HCSOP: HYPERVISOR_HCSOP, - HOST_TYPE_HCS: HYPERVISOR_HCS, - HOST_TYPE_OPENSTACK: HYPERVISOR_OPENSTACK, - HOST_TYPE_UCLOUD: HYPERVISOR_UCLOUD, - HOST_TYPE_VOLCENGINE: HYPERVISOR_VOLCENGINE, - HOST_TYPE_ZSTACK: HYPERVISOR_ZSTACK, - HOST_TYPE_GOOGLE: HYPERVISOR_GOOGLE, - HOST_TYPE_CTYUN: HYPERVISOR_CTYUN, - HOST_TYPE_ECLOUD: HYPERVISOR_ECLOUD, - HOST_TYPE_JDCLOUD: HYPERVISOR_JDCLOUD, - HOST_TYPE_CLOUDPODS: HYPERVISOR_CLOUDPODS, - HOST_TYPE_NUTANIX: HYPERVISOR_NUTANIX, - HOST_TYPE_BINGO_CLOUD: HYPERVISOR_BINGO_CLOUD, - HOST_TYPE_INCLOUD_SPHERE: HYPERVISOR_INCLOUD_SPHERE, - HOST_TYPE_PROXMOX: HYPERVISOR_PROXMOX, - HOST_TYPE_REMOTEFILE: HYPERVISOR_REMOTEFILE, - HOST_TYPE_H3C: HYPERVISOR_H3C, - HOST_TYPE_KSYUN: HYPERVISOR_KSYUN, - HOST_TYPE_BAIDU: HYPERVISOR_BAIDU, - HOST_TYPE_CUCLOUD: HYPERVISOR_CUCLOUD, - HOST_TYPE_QINGCLOUD: HYPERVISOR_QINGCLOUD, - HOST_TYPE_ORACLE: HYPERVISOR_ORACLE, -} - const ( VM_DEFAULT_WINDOWS_LOGIN_USER = compute.VM_DEFAULT_WINDOWS_LOGIN_USER VM_DEFAULT_LINUX_LOGIN_USER = compute.VM_DEFAULT_LINUX_LOGIN_USER @@ -440,14 +323,6 @@ const ( VM_METADATA_START_VCPU_COUNT = "start_vcpu_count" ) -func Hypervisors2HostTypes(hypervisors []string) []string { - hostTypes := make([]string, len(hypervisors)) - for i := range hypervisors { - hostTypes[i] = HYPERVISOR_HOSTTYPE[hypervisors[i]] - } - return hostTypes -} - // windows allow a maximal length of 15 // http://support.microsoft.com/kb/909264 const MAX_WINDOWS_COMPUTER_NAME_LENGTH = 15 diff --git a/pkg/compute/guestdrivers/baremetals.go b/pkg/compute/guestdrivers/baremetals.go index 95bb8d173b7..6da2defb0cb 100644 --- a/pkg/compute/guestdrivers/baremetals.go +++ b/pkg/compute/guestdrivers/baremetals.go @@ -184,7 +184,12 @@ func (self *SBaremetalGuestDriver) Attach2RandomNetwork(guest *models.SGuest, ct netsAvaiable := make([]models.SNetwork, 0) netifIndexs := make(map[string][]models.SNetInterface, 0) - netTypes := guest.GetDriver().GetRandomNetworkTypes() + drv, err := guest.GetDriver() + if err != nil { + return nil, err + } + + netTypes := drv.GetRandomNetworkTypes() if len(netConfig.NetType) > 0 { netTypes = []string{netConfig.NetType} } diff --git a/pkg/compute/guestdrivers/base.go b/pkg/compute/guestdrivers/base.go index 15c1eed2e9b..8fb785475b2 100644 --- a/pkg/compute/guestdrivers/base.go +++ b/pkg/compute/guestdrivers/base.go @@ -565,8 +565,13 @@ func (base *SBaseGuestDriver) ValidateGuestChangeConfigInput(ctx context.Context confs.Old.CpuSockets = guest.CpuSockets confs.Old.VmemSize = guest.VmemSize + region, err := guest.GetRegion() + if err != nil { + return nil, err + } + if len(input.InstanceType) > 0 { - sku, err := models.ServerSkuManager.FetchSkuByNameAndProvider(input.InstanceType, guest.GetDriver().GetProvider(), true) + sku, err := models.ServerSkuManager.FetchSkuByNameAndProvider(input.InstanceType, region.Provider, true) if err != nil { return nil, errors.Wrap(err, "FetchSkuByNameAndProvider") } diff --git a/pkg/compute/guestdrivers/cloudpods-baremetals.go b/pkg/compute/guestdrivers/cloudpods-baremetals.go new file mode 100644 index 00000000000..01464ff956a --- /dev/null +++ b/pkg/compute/guestdrivers/cloudpods-baremetals.go @@ -0,0 +1,567 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package guestdrivers + +import ( + "context" + "fmt" + "path/filepath" + "regexp" + "time" + + "yunion.io/x/cloudmux/pkg/cloudprovider" + "yunion.io/x/jsonutils" + "yunion.io/x/log" + "yunion.io/x/pkg/errors" + "yunion.io/x/pkg/util/rbacscope" + "yunion.io/x/pkg/utils" + + api "yunion.io/x/onecloud/pkg/apis/compute" + "yunion.io/x/onecloud/pkg/cloudcommon/db" + "yunion.io/x/onecloud/pkg/cloudcommon/db/quotas" + "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" + "yunion.io/x/onecloud/pkg/compute/baremetal" + "yunion.io/x/onecloud/pkg/compute/models" + "yunion.io/x/onecloud/pkg/compute/options" + "yunion.io/x/onecloud/pkg/httperrors" + "yunion.io/x/onecloud/pkg/mcclient" + "yunion.io/x/onecloud/pkg/mcclient/cloudpods" +) + +type SCloudpodsBaremetalGuestDriver struct { + SManagedVirtualizedGuestDriver +} + +func init() { + driver := SCloudpodsBaremetalGuestDriver{} + models.RegisterGuestDriver(&driver) +} + +func (self *SCloudpodsBaremetalGuestDriver) GetHypervisor() string { + return api.HYPERVISOR_BAREMETAL +} + +func (self *SCloudpodsBaremetalGuestDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CLOUDPODS +} + +func (self *SCloudpodsBaremetalGuestDriver) GetInstanceCapability() cloudprovider.SInstanceCapability { + return cloudprovider.SInstanceCapability{ + Hypervisor: self.GetHypervisor(), + Provider: self.GetProvider(), + } +} + +func (self *SCloudpodsBaremetalGuestDriver) GetComputeQuotaKeys(scope rbacscope.TRbacScope, ownerId mcclient.IIdentityProvider, brand string) models.SComputeResourceKeys { + keys := models.SComputeResourceKeys{} + keys.SBaseProjectQuotaKeys = quotas.OwnerIdProjectQuotaKeys(scope, ownerId) + keys.CloudEnv = api.CLOUD_ENV_PRIVATE_CLOUD + keys.Provider = api.CLOUD_PROVIDER_CLOUDPODS + keys.Brand = api.CLOUD_PROVIDER_CLOUDPODS + keys.Hypervisor = api.HYPERVISOR_BAREMETAL + return keys +} + +func (self *SCloudpodsBaremetalGuestDriver) GetDefaultSysDiskBackend() string { + return api.STORAGE_LOCAL +} + +func (self *SCloudpodsBaremetalGuestDriver) GetMinimalSysDiskSizeGb() int { + return options.Options.DefaultDiskSizeMB / 1024 +} + +func (self *SCloudpodsBaremetalGuestDriver) GetMaxSecurityGroupCount() int { + //暂不支持绑定安全组 + return 0 +} + +func (self *SCloudpodsBaremetalGuestDriver) GetMaxVCpuCount() int { + return 1024 +} + +func (self *SCloudpodsBaremetalGuestDriver) GetMaxVMemSizeGB() int { + return 4096 +} + +func (self *SCloudpodsBaremetalGuestDriver) PrepareDiskRaidConfig(userCred mcclient.TokenCredential, host *models.SHost, confs []*api.BaremetalDiskConfig, disks []*api.DiskConfig) ([]*api.DiskConfig, error) { + baremetalStorage := models.ConvertStorageInfo2BaremetalStorages(host.StorageInfo) + if baremetalStorage == nil { + return nil, fmt.Errorf("Convert storage info error") + } + if len(confs) == 0 { + parsedConf, _ := baremetal.ParseDiskConfig("") + confs = []*api.BaremetalDiskConfig{&parsedConf} + } + layouts, err := baremetal.CalculateLayout(confs, baremetalStorage) + if err != nil { + return nil, err + } + err = host.UpdateDiskConfig(userCred, layouts) + if err != nil { + return nil, err + } + allocable, extra := baremetal.CheckDisksAllocable(layouts, disks) + if !allocable { + return nil, fmt.Errorf("baremetal.CheckDisksAllocable not allocable") + } + return extra, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) GetRebuildRootStatus() ([]string, error) { + return []string{api.VM_READY, api.VM_ADMIN}, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) GetChangeConfigStatus(guest *models.SGuest) ([]string, error) { + return nil, httperrors.NewUnsupportOperationError("Cannot change config for baremtal") +} + +func (self *SCloudpodsBaremetalGuestDriver) GetDeployStatus() ([]string, error) { + return []string{api.VM_READY, api.VM_ADMIN}, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) ValidateResizeDisk(guest *models.SGuest, disk *models.SDisk, storage *models.SStorage) error { + return httperrors.NewUnsupportOperationError("Cannot resize disk for baremtal") +} + +func (self *SCloudpodsBaremetalGuestDriver) GetNamedNetworkConfiguration(guest *models.SGuest, ctx context.Context, userCred mcclient.TokenCredential, host *models.SHost, netConfig *api.NetworkConfig) (*models.SNetwork, []models.SNicConfig, api.IPAllocationDirection, bool, error) { + netifs, net, err := host.GetNetinterfacesWithIdAndCredential(netConfig.Network, userCred, netConfig.Reserved) + if err != nil { + return nil, nil, "", false, errors.Wrap(err, "get host netinterfaces") + } + if netifs != nil { + nicCnt := 1 + if netConfig.RequireTeaming || netConfig.TryTeaming { + nicCnt = 2 + } + if len(netifs) < nicCnt { + if netConfig.RequireTeaming { + return net, nil, "", false, errors.Errorf("not enough network interfaces, want %d got %d", nicCnt, len(netifs)) + } + nicCnt = len(netifs) + } + nicConfs := make([]models.SNicConfig, 0) + for i := 0; i < nicCnt; i += 1 { + nicConf := models.SNicConfig{ + Mac: netifs[i].Mac, + Index: netifs[i].Index, + Ifname: "", + } + nicConfs = append(nicConfs, nicConf) + } + reuseAddr := false + hn := host.GetAttach2Network(netConfig.Network) + if hn != nil && options.Options.BaremetalServerReuseHostIp { + if netConfig.Address == "" || netConfig.Address == hn.IpAddr { + // try to reuse host network IP address + netConfig.Address = hn.IpAddr + reuseAddr = true + } + } + + return net, nicConfs, api.IPAllocationStepup, reuseAddr, nil + } + return net, nil, "", false, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) GetRandomNetworkTypes() []string { + return []string{api.NETWORK_TYPE_BAREMETAL, api.NETWORK_TYPE_GUEST} +} + +func (self *SCloudpodsBaremetalGuestDriver) Attach2RandomNetwork(guest *models.SGuest, ctx context.Context, userCred mcclient.TokenCredential, host *models.SHost, netConfig *api.NetworkConfig, pendingUsage quotas.IQuota) ([]models.SGuestnetwork, error) { + netifs := host.GetHostNetInterfaces() + netsAvaiable := make([]models.SNetwork, 0) + netifIndexs := make(map[string][]models.SNetInterface, 0) + + drv, err := guest.GetDriver() + if err != nil { + return nil, err + } + + netTypes := drv.GetRandomNetworkTypes() + if len(netConfig.NetType) > 0 { + netTypes = []string{netConfig.NetType} + } + var wirePattern *regexp.Regexp + if len(netConfig.Wire) > 0 { + wirePattern = regexp.MustCompile(netConfig.Wire) + } + for idx, netif := range netifs { + if !netif.IsUsableServernic() { + continue + } + wire := netif.GetWire() + if wire == nil { + continue + } + if wirePattern != nil && !wirePattern.MatchString(wire.Id) && !wirePattern.MatchString(wire.Name) { + continue + } + var net *models.SNetwork + if netConfig.Private { + net, _ = wire.GetCandidatePrivateNetwork(ctx, userCred, userCred, models.NetworkManager.AllowScope(userCred), netConfig.Exit, netTypes) + } else { + net, _ = wire.GetCandidateAutoAllocNetwork(ctx, userCred, userCred, models.NetworkManager.AllowScope(userCred), netConfig.Exit, netTypes) + } + if net != nil { + netsAvaiable = append(netsAvaiable, *net) + if _, exist := netifIndexs[net.WireId]; !exist { + netifIndexs[net.WireId] = make([]models.SNetInterface, 0) + } + netifIndexs[net.WireId] = append(netifIndexs[net.WireId], netifs[idx]) + } + } + if len(netsAvaiable) == 0 { + return nil, fmt.Errorf("No appropriate host virtual network...") + } + net := models.ChooseCandidateNetworks(netsAvaiable, netConfig.Exit, netTypes) + if net != nil { + netifs := netifIndexs[net.WireId] + nicConfs := make([]models.SNicConfig, 0) + nicCnt := 1 + if netConfig.RequireTeaming || netConfig.TryTeaming { + nicCnt = 2 + } + if len(netifs) < nicCnt { + if netConfig.RequireTeaming { + return nil, fmt.Errorf("not enough network interfaces, want %d got %d", nicCnt, len(netifs)) + } + nicCnt = len(netifs) + } + for i := 0; i < nicCnt; i += 1 { + nicConf := models.SNicConfig{ + Mac: netifs[i].Mac, + Index: netifs[i].Index, + Ifname: "", + } + nicConfs = append(nicConfs, nicConf) + } + address := "" + reuseAddr := false + hn := host.GetAttach2Network(net.Id) + if hn != nil && options.Options.BaremetalServerReuseHostIp { + // try to reuse host network IP address + address = hn.IpAddr + reuseAddr = true + } + return guest.Attach2Network(ctx, userCred, models.Attach2NetworkArgs{ + Network: net, + PendingUsage: pendingUsage, + IpAddr: address, + NicDriver: netConfig.Driver, + BwLimit: netConfig.BwLimit, + Virtual: netConfig.Vip, + TryReserved: false, + AllocDir: api.IPAllocationStepup, + RequireDesignatedIP: false, + UseDesignatedIP: reuseAddr, + NicConfs: nicConfs, + + IsDefault: netConfig.IsDefault, + }) + } + return nil, fmt.Errorf("No appropriate host virtual network...") +} + +func (self *SCloudpodsBaremetalGuestDriver) GetStorageTypes() []string { + return []string{ + api.STORAGE_BAREMETAL, + } +} + +func (self *SCloudpodsBaremetalGuestDriver) ChooseHostStorage(host *models.SHost, guest *models.SGuest, diskConfig *api.DiskConfig, storageIds []string) (*models.SStorage, error) { + if len(storageIds) != 0 { + return models.StorageManager.FetchStorageById(storageIds[0]), nil + } + bs := host.GetBaremetalstorage() + if bs == nil { + return nil, nil + } + return bs.GetStorage(), nil +} + +func (self *SCloudpodsBaremetalGuestDriver) RequestGuestCreateAllDisks(ctx context.Context, guest *models.SGuest, task taskman.ITask) error { + diskCat := guest.CategorizeDisks() + var imageId string + if diskCat.Root != nil { + imageId = diskCat.Root.GetTemplateId() + } + if len(imageId) == 0 { + task.ScheduleRun(nil) + return nil + } + storage, _ := diskCat.Root.GetStorage() + if storage == nil { + return fmt.Errorf("no valid storage") + } + storageCache := storage.GetStoragecache() + if storageCache == nil { + return fmt.Errorf("no valid storage cache") + } + input := api.CacheImageInput{ + ImageId: imageId, + Format: "qcow2", + ParentTaskId: task.GetTaskId(), + } + return storageCache.StartImageCacheTask(ctx, task.GetUserCred(), input) +} + +func (self *SCloudpodsBaremetalGuestDriver) NeedRequestGuestHotAddIso(ctx context.Context, guest *models.SGuest) bool { + return true +} + +func (self *SCloudpodsBaremetalGuestDriver) RequestGuestHotAddIso(ctx context.Context, guest *models.SGuest, path string, boot bool, task taskman.ITask) error { + host, _ := guest.GetHost() + return host.StartInsertIsoTask(ctx, task.GetUserCred(), filepath.Base(path), boot, task.GetTaskId()) +} + +func (self *SCloudpodsBaremetalGuestDriver) RequestGuestHotRemoveIso(ctx context.Context, guest *models.SGuest, task taskman.ITask) error { + host, _ := guest.GetHost() + return host.StartEjectIsoTask(ctx, task.GetUserCred(), task.GetTaskId()) +} + +func (self *SCloudpodsBaremetalGuestDriver) RequestGuestCreateInsertIso(ctx context.Context, imageId string, bootIndex *int8, task taskman.ITask, guest *models.SGuest) error { + return guest.StartInsertIsoTask(ctx, 0, imageId, true, nil, guest.HostId, task.GetUserCred(), task.GetTaskId()) +} + +func (self *SCloudpodsBaremetalGuestDriver) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, input *api.ServerCreateInput) (*api.ServerCreateInput, error) { + if len(input.BaremetalDiskConfigs) != 0 { + if err := baremetal.ValidateDiskConfigs(input.BaremetalDiskConfigs); err != nil { + return nil, httperrors.NewInputParameterError("Invalid raid config: %v", err) + } + } + return input, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) ValidateCreateDataOnHost(ctx context.Context, userCred mcclient.TokenCredential, bmName string, host *models.SHost, input *api.ServerCreateInput) (*api.ServerCreateInput, error) { + if host.HostType != api.HOST_TYPE_BAREMETAL || !host.IsBaremetal { + return nil, httperrors.NewInputParameterError("Host %s is not a baremetal", bmName) + } + if !utils.IsInStringArray(host.Status, []string{api.BAREMETAL_READY, api.BAREMETAL_RUNNING, api.BAREMETAL_START_CONVERT}) { + return nil, httperrors.NewInvalidStatusError("CloudpodsBaremetal %s is not ready", bmName) + } + if host.GetBaremetalServer() != nil { + return nil, httperrors.NewInsufficientResourceError("CloudpodsBaremetal %s is occupied", bmName) + } + input.VmemSize = host.MemSize + input.VcpuCount = int(host.CpuCount) + return input, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) GetGuestVncInfo(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, host *models.SHost, input *cloudprovider.ServerVncInput) (*cloudprovider.ServerVncOutput, error) { + ret := &cloudprovider.ServerVncOutput{} + ret.HostId = host.Id + zone, _ := host.GetZone() + ret.Zone = zone.Name + return ret, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) RequestDeployGuestOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error { + config, err := guest.GetDeployConfigOnHost(ctx, task.GetUserCred(), host, task.GetParams()) + if err != nil { + log.Errorf("GetDeployConfigOnHost error: %v", err) + return err + } + val, _ := config.GetString("action") + if len(val) == 0 { + val = "deploy" + } + + desc := cloudprovider.SManagedVMCreateConfig{} + desc.Description = guest.Description + // 账号必须在desc.GetConfig()之前设置,避免默认用户不能正常注入 + osInfo := struct { + OsType string + OsDistribution string + ImageType string + }{} + config.Unmarshal(&osInfo, "desc") + + driver, err := guest.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + + desc.Account = driver.GetDefaultAccount(osInfo.OsType, osInfo.OsDistribution, osInfo.ImageType) + err = desc.GetConfig(config) + if err != nil { + return errors.Wrapf(err, "desc.GetConfig") + } + + log.Debugf("%s baremetal config: %s", val, jsonutils.Marshal(desc).String()) + + switch val { + case "create": + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + iHost, err := host.GetIHost(ctx) + if err != nil { + return nil, err + } + h := iHost.(*cloudpods.SHost) + opts := &api.ServerCreateInput{ + ServerConfigs: &api.ServerConfigs{}, + } + task.GetParams().Unmarshal(&opts.BaremetalDiskConfigs, "baremetal_disk_configs") + opts.Name = guest.Name + opts.Hostname = guest.Hostname + opts.Description = guest.Description + opts.InstanceType = guest.InstanceType + opts.VcpuCount = guest.VcpuCount + opts.VmemSize = guest.VmemSize + opts.Password = desc.Password + opts.Metadata, _ = guest.GetAllUserMetadata() + opts.UserData, _ = desc.GetUserData() + opts.Hypervisor = api.HYPERVISOR_BAREMETAL + networks := []*api.NetworkConfig{} + if len(desc.ExternalNetworkId) > 0 { + networks = append(networks, &api.NetworkConfig{ + Network: desc.ExternalNetworkId, + Address: desc.IpAddr, + }) + } + disks := []*api.DiskConfig{} + disks = append(disks, &api.DiskConfig{ + Index: 0, + ImageId: desc.ExternalImageId, + DiskType: api.DISK_TYPE_SYS, + SizeMb: desc.SysDisk.SizeGB * 1024, + Backend: desc.SysDisk.StorageType, + Storage: desc.SysDisk.StorageExternalId, + }) + for idx, disk := range desc.DataDisks { + info := &api.DiskConfig{ + Index: idx + 1, + DiskType: api.DISK_TYPE_DATA, + SizeMb: -1, + Backend: disk.StorageType, + Storage: disk.StorageExternalId, + } + if disk.SizeGB > 0 { + info.SizeMb = disk.SizeGB * 1024 + } + disks = append(disks, info) + } + opts.Disks = disks + opts.Networks = networks + if len(desc.ProjectId) > 0 { + opts.ProjectId = desc.ProjectId + } + + log.Debugf("create baremetal params: %s", jsonutils.Marshal(opts)) + + iVM, err := h.CreateBaremetalServer(opts) + if err != nil { + return nil, errors.Wrapf(err, "CreateBaremetalServer") + } + db.SetExternalId(guest, task.GetUserCred(), iVM.GetGlobalId()) + + vmId := iVM.GetGlobalId() + initialState := driver.GetGuestInitialStateAfterCreate() + log.Debugf("VMcreated %s, wait status %s ...", vmId, initialState) + err = cloudprovider.WaitStatusWithInstanceErrorCheck(iVM, initialState, time.Second*5, time.Second*1800, func() error { + return iVM.GetError() + }) + if err != nil { + return nil, err + } + log.Debugf("VMcreated %s, and status is running", vmId) + + iVM, err = iHost.GetIVMById(vmId) + if err != nil { + return nil, errors.Wrapf(err, "GetIVMById(%s)", vmId) + } + + data := fetchIVMinfo(desc, iVM, guest.Id, desc.Account, desc.Password, desc.PublicKey, "create") + return data, nil + }) + default: + return fmt.Errorf("Action %s not supported", val) + } + return nil +} + +func (self *SCloudpodsBaremetalGuestDriver) CanKeepDetachDisk() bool { + return false +} + +func (self *SCloudpodsBaremetalGuestDriver) RequestSyncConfigOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error { + return task.ScheduleRun(nil) +} + +func (self *SCloudpodsBaremetalGuestDriver) StartGuestDetachdiskTask(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + return fmt.Errorf("Cannot detach disk from a baremetal server") +} + +func (self *SCloudpodsBaremetalGuestDriver) StartGuestAttachDiskTask(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + return fmt.Errorf("Cannot attach disk to a baremetal server") +} + +func (self *SCloudpodsBaremetalGuestDriver) StartSuspendTask(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + return fmt.Errorf("Cannot suspend a baremetal server") +} + +func (self *SCloudpodsBaremetalGuestDriver) StartResumeTask(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + return fmt.Errorf("Cannot resume a baremetal server") +} + +func (self *SCloudpodsBaremetalGuestDriver) StartGuestSaveImage(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + return httperrors.NewUnsupportOperationError("Cannot save image for baremtal") +} + +func (self *SCloudpodsBaremetalGuestDriver) StartGuestSaveGuestImage(ctx context.Context, userCred mcclient.TokenCredential, + guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + + return httperrors.NewUnsupportOperationError("Cannot save image for baremtal") +} + +func (self *SCloudpodsBaremetalGuestDriver) StartGuestResetTask(guest *models.SGuest, ctx context.Context, userCred mcclient.TokenCredential, isHard bool, parentTaskId string) error { + task, err := taskman.TaskManager.NewTask(ctx, "BaremetalServerResetTask", guest, userCred, nil, "", parentTaskId, nil) + if err != nil { + return err + } + task.ScheduleRun(nil) + return nil +} + +func (self *SCloudpodsBaremetalGuestDriver) OnDeleteGuestFinalCleanup(ctx context.Context, guest *models.SGuest, userCred mcclient.TokenCredential) error { + err := guest.DeleteAllDisksInDB(ctx, userCred) + if err != nil { + return err + } + baremetal, _ := guest.GetHost() + if baremetal != nil { + return baremetal.UpdateDiskConfig(userCred, nil) + } + return nil +} + +func (self *SCloudpodsBaremetalGuestDriver) IsSupportGuestClone() bool { + return false +} + +func (self *SCloudpodsBaremetalGuestDriver) IsSupportCdrom(guest *models.SGuest) (bool, error) { + host, _ := guest.GetHost() + if host == nil { + return false, errors.Wrap(httperrors.ErrNotFound, "no host") + } + ipmiInfo, err := host.GetIpmiInfo() + if err != nil { + return false, errors.Wrap(err, "host.GetIpmiInfo") + } + return ipmiInfo.CdromBoot, nil +} + +func (self *SCloudpodsBaremetalGuestDriver) IsSupportFloppy(guest *models.SGuest) (bool, error) { + return false, nil +} diff --git a/pkg/compute/guestdrivers/cloudpods-esxi.go b/pkg/compute/guestdrivers/cloudpods-esxi.go new file mode 100644 index 00000000000..33ec949f202 --- /dev/null +++ b/pkg/compute/guestdrivers/cloudpods-esxi.go @@ -0,0 +1,894 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package guestdrivers + +import ( + "context" + "fmt" + "sort" + "strconv" + "time" + + "yunion.io/x/cloudmux/pkg/cloudprovider" + "yunion.io/x/cloudmux/pkg/multicloud/esxi" + "yunion.io/x/jsonutils" + "yunion.io/x/log" + "yunion.io/x/pkg/errors" + "yunion.io/x/pkg/util/billing" + "yunion.io/x/pkg/util/httputils" + "yunion.io/x/pkg/util/rbacscope" + "yunion.io/x/pkg/utils" + "yunion.io/x/sqlchemy" + + api "yunion.io/x/onecloud/pkg/apis/compute" + "yunion.io/x/onecloud/pkg/cloudcommon/db" + "yunion.io/x/onecloud/pkg/cloudcommon/db/quotas" + "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" + "yunion.io/x/onecloud/pkg/compute/models" + "yunion.io/x/onecloud/pkg/compute/options" + "yunion.io/x/onecloud/pkg/httperrors" + "yunion.io/x/onecloud/pkg/mcclient" +) + +type SCloudpodsESXiGuestDriver struct { + SManagedVirtualizedGuestDriver +} + +func init() { + driver := SCloudpodsESXiGuestDriver{} + models.RegisterGuestDriver(&driver) +} + +func (self *SCloudpodsESXiGuestDriver) DoScheduleCPUFilter() bool { return true } + +func (self *SCloudpodsESXiGuestDriver) DoScheduleMemoryFilter() bool { return true } + +func (self *SCloudpodsESXiGuestDriver) DoScheduleSKUFilter() bool { return false } + +func (self *SCloudpodsESXiGuestDriver) DoScheduleStorageFilter() bool { return true } + +func (self *SCloudpodsESXiGuestDriver) GetHypervisor() string { + return api.HYPERVISOR_ESXI +} + +func (self *SCloudpodsESXiGuestDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CLOUDPODS +} + +func (self *SCloudpodsESXiGuestDriver) GetInstanceCapability() cloudprovider.SInstanceCapability { + return cloudprovider.SInstanceCapability{ + Hypervisor: self.GetHypervisor(), + Provider: self.GetProvider(), + DefaultAccount: cloudprovider.SDefaultAccount{ + Linux: cloudprovider.SOsDefaultAccount{ + DefaultAccount: api.VM_DEFAULT_LINUX_LOGIN_USER, + Changeable: true, + }, + Windows: cloudprovider.SOsDefaultAccount{ + DefaultAccount: api.VM_DEFAULT_WINDOWS_LOGIN_USER, + Changeable: false, + }, + }, + Storages: cloudprovider.Storage{ + SysDisk: []cloudprovider.StorageInfo{ + {StorageType: api.STORAGE_LOCAL, MinSizeGb: options.Options.LocalSysDiskMinSizeGB, MaxSizeGb: options.Options.LocalSysDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_NAS, MinSizeGb: options.Options.LocalSysDiskMinSizeGB, MaxSizeGb: options.Options.LocalSysDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_NFS, MinSizeGb: options.Options.LocalSysDiskMinSizeGB, MaxSizeGb: options.Options.LocalSysDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_VSAN, MinSizeGb: options.Options.LocalSysDiskMinSizeGB, MaxSizeGb: options.Options.LocalSysDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_CIFS, MinSizeGb: options.Options.LocalSysDiskMinSizeGB, MaxSizeGb: options.Options.LocalSysDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + }, + DataDisk: []cloudprovider.StorageInfo{ + {StorageType: api.STORAGE_LOCAL, MinSizeGb: options.Options.LocalDataDiskMinSizeGB, MaxSizeGb: options.Options.LocalDataDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_NAS, MinSizeGb: options.Options.LocalDataDiskMinSizeGB, MaxSizeGb: options.Options.LocalDataDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_NFS, MinSizeGb: options.Options.LocalDataDiskMinSizeGB, MaxSizeGb: options.Options.LocalDataDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_VSAN, MinSizeGb: options.Options.LocalDataDiskMinSizeGB, MaxSizeGb: options.Options.LocalDataDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + {StorageType: api.STORAGE_CIFS, MinSizeGb: options.Options.LocalDataDiskMinSizeGB, MaxSizeGb: options.Options.LocalDataDiskMaxSizeGB, StepSizeGb: 1, Resizable: true}, + }, + }, + } +} + +func (self *SCloudpodsESXiGuestDriver) GetComputeQuotaKeys(scope rbacscope.TRbacScope, ownerId mcclient.IIdentityProvider, brand string) models.SComputeResourceKeys { + keys := models.SComputeResourceKeys{} + keys.SBaseProjectQuotaKeys = quotas.OwnerIdProjectQuotaKeys(scope, ownerId) + keys.CloudEnv = api.CLOUD_ENV_PRIVATE_CLOUD + keys.Provider = api.CLOUD_PROVIDER_CLOUDPODS + keys.Brand = api.CLOUD_PROVIDER_CLOUDPODS + keys.Hypervisor = api.HYPERVISOR_ESXI + return keys +} + +func (self *SCloudpodsESXiGuestDriver) GetDefaultSysDiskBackend() string { + return "" +} + +func (self *SCloudpodsESXiGuestDriver) ChooseHostStorage(host *models.SHost, guest *models.SGuest, diskConfig *api.DiskConfig, storageIds []string) (*models.SStorage, error) { + switch { + case !options.Options.LockStorageFromCachedimage: + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + case len(diskConfig.ImageId) > 0: + var ( + image *cloudprovider.SImage + err error + ) + obj, err := models.CachedimageManager.FetchById(diskConfig.ImageId) + if err != nil { + return nil, errors.Wrapf(err, "unable to fetch cachedimage %s", diskConfig.ImageId) + } + cachedimage := obj.(*models.SCachedimage) + if len(cachedimage.ExternalId) > 0 || cloudprovider.TImageType(cachedimage.ImageType) != cloudprovider.ImageTypeSystem { + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + } + storages, err := cachedimage.GetStorages() + if err != nil { + return nil, errors.Wrapf(err, "unable to GetStorages of cachedimage %s", diskConfig.ImageId) + } + if len(storages) == 0 { + log.Warningf("there no storage associated with cachedimage %q", image.Id) + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + } + if len(storages) > 1 { + log.Warningf("there are multiple storageCache associated with caheimage %q", image.Id) + } + wantStorageIds := make([]string, len(storages)) + for i := range wantStorageIds { + wantStorageIds[i] = storages[i].GetId() + } + for i := range wantStorageIds { + if utils.IsInStringArray(wantStorageIds[i], storageIds) { + log.Infof("use storage %q in where cachedimage %q", wantStorageIds[i], image.Id) + return &storages[i], nil + } + } + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + default: + ispId := guest.GetMetadata(context.Background(), "__base_instance_snapshot_id", nil) + if len(ispId) == 0 { + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + } + obj, err := models.InstanceSnapshotManager.FetchById(ispId) + if err != nil { + return nil, errors.Wrapf(err, "unable to fetch InstanceSnapshot %q", ispId) + } + isp := obj.(*models.SInstanceSnapshot) + ispGuest, err := isp.GetGuest() + if err != nil { + return nil, errors.Wrapf(err, "unable to fetch Guest of InstanceSnapshot %q", ispId) + } + storages, err := ispGuest.GetStorages() + if err != nil { + return nil, errors.Wrapf(err, "GetStorages") + } + if len(storages) == 0 { + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + } + if utils.IsInStringArray(storages[0].GetId(), storageIds) { + return &storages[0], nil + } + return self.SVirtualizedGuestDriver.ChooseHostStorage(host, guest, diskConfig, storageIds) + } +} + +func (self *SCloudpodsESXiGuestDriver) GetGuestVncInfo(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, host *models.SHost, input *cloudprovider.ServerVncInput) (*cloudprovider.ServerVncOutput, error) { + iVM, err := guest.GetIVM(ctx) + if err != nil { + return nil, err + } + return iVM.GetVNCInfo(input) +} + +func (self *SCloudpodsESXiGuestDriver) GetMinimalSysDiskSizeGb() int { + return options.Options.DefaultDiskSizeMB / 1024 +} + +func (self *SCloudpodsESXiGuestDriver) RequestSyncSecgroupsOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error { + return nil // do nothing, not support securitygroup +} + +func (self *SCloudpodsESXiGuestDriver) GetMaxSecurityGroupCount() int { + //暂不支持绑定安全组 + return 0 +} + +func (self *SCloudpodsESXiGuestDriver) GetDetachDiskStatus() ([]string, error) { + return []string{api.VM_READY, api.VM_RUNNING}, nil +} + +func (self *SCloudpodsESXiGuestDriver) GetAttachDiskStatus() ([]string, error) { + return []string{api.VM_READY, api.VM_RUNNING}, nil +} + +func (self *SCloudpodsESXiGuestDriver) GetChangeConfigStatus(guest *models.SGuest) ([]string, error) { + return []string{api.VM_READY, api.VM_RUNNING}, nil +} + +func (self *SCloudpodsESXiGuestDriver) CanKeepDetachDisk() bool { + return false +} + +// func (self *SCloudpodsESXiGuestDriver) RequestDeleteDetachedDisk(ctx context.Context, disk *models.SDisk, task taskman.ITask, isPurge bool) error { +// err := disk.RealDelete(ctx, task.GetUserCred()) +// if err != nil { +// return err +// } +// task.ScheduleRun(nil) +// return nil +// } + +func (self *SCloudpodsESXiGuestDriver) RequestGuestHotAddIso(ctx context.Context, guest *models.SGuest, path string, boot bool, task taskman.ITask) error { + task.ScheduleRun(nil) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) GetRebuildRootStatus() ([]string, error) { + return []string{api.VM_READY}, nil +} + +func (self *SCloudpodsESXiGuestDriver) GetDeployStatus() ([]string, error) { + return []string{api.VM_READY}, nil +} + +func (self *SCloudpodsESXiGuestDriver) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, data *api.ServerCreateInput) (*api.ServerCreateInput, error) { + for i := 0; i < len(data.Disks); i++ { + data.Disks[i].Format = "vmdk" + } + + if data.CpuSockets > data.VcpuCount { + return nil, httperrors.NewInputParameterError("The number of cpu sockets cannot be greater than the number of cpus") + } + + // check disk config + if len(data.Disks) == 0 { + return data, nil + } + rootDisk := data.Disks[0] + if len(rootDisk.ImageId) == 0 { + return data, nil + } + image, err := models.CachedimageManager.GetImageInfo(ctx, userCred, rootDisk.ImageId, false) + if err != nil { + return nil, errors.Wrapf(err, "unable to GetImageInfo of image %q", rootDisk.ImageId) + } + if len(image.SubImages) <= 1 { + return data, nil + } + sort.Slice(image.SubImages, func(i, j int) bool { + return image.SubImages[i].Index < image.SubImages[j].Index + }) + newDataDisks := make([]*api.DiskConfig, 0, len(image.SubImages)+len(data.Disks)-1) + for i, subImage := range image.SubImages { + nDataDisk := *rootDisk + nDataDisk.SizeMb = subImage.MinDiskMB + nDataDisk.Format = "vmdk" + nDataDisk.Index = i + if i > 0 { + nDataDisk.ImageId = "" + } + newDataDisks = append(newDataDisks, &nDataDisk) + } + for i := 1; i < len(data.Disks); i++ { + data.Disks[i].Index += len(image.SubImages) - 1 + newDataDisks = append(newDataDisks, data.Disks[i]) + } + data.Disks = newDataDisks + return data, nil +} + +func (self *SCloudpodsESXiGuestDriver) ValidateCreateEip(ctx context.Context, userCred mcclient.TokenCredential, input api.ServerCreateEipInput) error { + return httperrors.NewInputParameterError("%s not support create eip", self.GetHypervisor()) +} + +func (self *SCloudpodsESXiGuestDriver) ValidateResizeDisk(guest *models.SGuest, disk *models.SDisk, storage *models.SStorage) error { + if !utils.IsInStringArray(guest.Status, []string{api.VM_READY, api.VM_RUNNING}) { + return fmt.Errorf("Cannot resize disk when guest in status %s", guest.Status) + } + count, err := guest.GetInstanceSnapshotCount() + if err != nil { + return errors.Wrapf(err, "unable to GetInstanceSnapshotCount for guest %q", guest.GetId()) + } + if count > 0 { + return httperrors.NewForbiddenError("can't resize disk for guest with instance snapshots") + } + /*if !utils.IsInStringArray(storage.StorageType, []string{models.STORAGE_PUBLIC_CLOUD, models.STORAGE_CLOUD_SSD, models.STORAGE_CLOUD_EFFICIENCY}) { + return fmt.Errorf("Cannot resize %s disk", storage.StorageType) + }*/ + return nil +} + +func (self *SCloudpodsESXiGuestDriver) GetJsonDescAtHost(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, host *models.SHost, params *jsonutils.JSONDict) (jsonutils.JSONObject, error) { + desc := guest.GetJsonDescAtHypervisor(ctx, host) + // add image_info + if len(desc.Disks) == 0 { + return jsonutils.Marshal(desc), nil + } + for i := range desc.Disks { + diskId := desc.Disks[i].DiskId + disk := models.DiskManager.FetchDiskById(diskId) + if disk == nil { + return nil, fmt.Errorf("unable to fetch disk %s", diskId) + } + storage, err := disk.GetStorage() + if storage == nil { + return nil, errors.Wrapf(err, "unable to fetch storage of disk %s", diskId) + } + desc.Disks[i].StorageId = storage.GetExternalId() + desc.Disks[i].Preallocation = disk.Preallocation + } + templateId := desc.Disks[0].TemplateId + if len(templateId) == 0 { + // try to check instance_snapshot_id + ispId := guest.GetMetadata(ctx, "__base_instance_snapshot_id", userCred) + if len(ispId) == 0 { + return jsonutils.Marshal(desc), nil + } + obj, err := models.InstanceSnapshotManager.FetchById(ispId) + if err != nil { + return nil, errors.Wrapf(err, "unable to fetch InstanceSnapshot %q", ispId) + } + isp := obj.(*models.SInstanceSnapshot) + ispGuest, err := isp.GetGuest() + if err != nil { + return nil, errors.Wrapf(err, "unable to fetch Guest of InstanceSnapshot %q", ispId) + } + desc.InstanceSnapshotInfo.InstanceSnapshotId = isp.GetExternalId() + desc.InstanceSnapshotInfo.InstanceId = ispGuest.GetExternalId() + return jsonutils.Marshal(desc), nil + } + model, err := models.CachedimageManager.FetchById(templateId) + if err != nil { + return jsonutils.Marshal(desc), errors.Wrapf(err, "CachedimageManager.FetchById(%s)", templateId) + } + img := model.(*models.SCachedimage) + if cloudprovider.TImageType(img.ImageType) != cloudprovider.ImageTypeSystem { + return jsonutils.Marshal(desc), nil + } + sciSubQ := models.StoragecachedimageManager.Query("storagecache_id").Equals("cachedimage_id", templateId).Equals("status", api.CACHED_IMAGE_STATUS_ACTIVE).SubQuery() + scQ := models.StoragecacheManager.Query().In("id", sciSubQ) + storageCaches := make([]models.SStoragecache, 0, 1) + err = db.FetchModelObjects(models.StoragecacheManager, scQ, &storageCaches) + if err != nil { + return jsonutils.Marshal(desc), errors.Wrapf(err, "fetch storageCache associated with cacheimage %s", templateId) + } + if len(storageCaches) == 0 { + return jsonutils.Marshal(desc), errors.Errorf("no such storage cache associated with cacheimage %s", templateId) + } + if len(storageCaches) > 1 { + log.Warningf("there are multiple storageCache associated with caheimage '%s' ??!!", templateId) + } + + var storageCacheHost *models.SHost + // select storagecacheHost + for i := range storageCaches { + hosts, err := storageCaches[i].GetHosts() + if err != nil { + return jsonutils.Marshal(desc), errors.Wrap(err, "storageCaches.GetHosts") + } + for i := range hosts { + if host.GetId() == hosts[i].GetId() { + storageCacheHost = &hosts[i] + } + } + } + + if storageCacheHost == nil { + storageCacheHost, err = storageCaches[0].GetMasterHost() + if err != nil { + return jsonutils.Marshal(desc), errors.Wrapf(err, "unable to GetHost of storageCache %s", storageCaches[0].Id) + } + if storageCacheHost == nil { + return jsonutils.Marshal(desc), fmt.Errorf("unable to GetHost of storageCache %s: result is nil", storageCaches[0].Id) + } + } + + desc.Disks[0].ImageInfo.ImageType = img.ImageType + desc.Disks[0].ImageInfo.ImageExternalId = img.ExternalId + desc.Disks[0].ImageInfo.StorageCacheHostIp = storageCacheHost.AccessIp + return jsonutils.Marshal(desc), nil +} + +func (self *SCloudpodsESXiGuestDriver) RequestDeployGuestOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error { + config, err := guest.GetDeployConfigOnHost(ctx, task.GetUserCred(), host, task.GetParams()) + if err != nil { + log.Errorf("GetDeployConfigOnHost error: %v", err) + return err + } + log.Debugf("RequestDeployGuestOnHost: %s", config) + + if !host.IsEsxiAgentReady() { + return fmt.Errorf("No CloudpodsESXi agent host") + } + + diskCat := guest.CategorizeDisks() + if diskCat.Root == nil { + return fmt.Errorf("no root disk???") + } + storage, _ := diskCat.Root.GetStorage() + if storage == nil { + return fmt.Errorf("root disk has no storage???") + } + + config.Add(jsonutils.NewString(host.AccessIp), "host_ip") + config.Add(jsonutils.NewString(guest.Id), "guest_id") + extId := guest.Id + if len(guest.ExternalId) > 0 { + extId = guest.ExternalId + } + config.Add(jsonutils.NewString(extId), "guest_ext_id") + tags, _ := guest.GetAllUserMetadata() + config.Set("tags", jsonutils.Marshal(tags)) + + account := host.GetCloudaccount() + accessInfo, err := account.GetVCenterAccessInfo(storage.ExternalId) + if err != nil { + return err + } + + provider := host.GetCloudprovider() + + action, _ := config.GetString("action") + if action == "create" { + project, err := db.TenantCacheManager.FetchTenantById(ctx, guest.ProjectId) + if err != nil { + return errors.Wrapf(err, "FetchTenantById(%s)", guest.ProjectId) + } + + projects, err := provider.GetExternalProjectsByProjectIdOrName(project.Id, project.Name) + if err != nil { + return errors.Wrapf(err, "GetExternalProjectsByProjectIdOrName(%s,%s)", project.Id, project.Name) + } + + extProj := models.GetAvailableExternalProject(project, projects) + if extProj != nil { + config.Add(jsonutils.NewString(extProj.Name), "desc", "resource_pool") + } else { + config.Add(jsonutils.NewString(project.Name), "desc", "resource_pool") + } + } + + config.Add(jsonutils.Marshal(accessInfo), "datastore") + + url := "/disks/agent/deploy" + + body := jsonutils.NewDict() + body.Add(config, "disk") + + header := task.GetTaskRequestHeader() + + _, err = host.EsxiRequest(ctx, httputils.POST, url, header, body) + return err +} + +func (self *SCloudpodsESXiGuestDriver) RequestSuspendOnHost(ctx context.Context, guest *models.SGuest, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + ivm, err := guest.GetIVM(ctx) + if err != nil { + return nil, err + } + vm := ivm.(*esxi.SVirtualMachine) + err = vm.SuspendVM(ctx) + if err != nil { + return nil, errors.Wrap(err, "VM.SuspendVM for vmware") + } + return nil, nil + }) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) RequestResumeOnHost(ctx context.Context, guest *models.SGuest, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + ivm, err := guest.GetIVM(ctx) + if err != nil { + return nil, err + } + vm := ivm.(*esxi.SVirtualMachine) + err = vm.ResumeVM(ctx) + if err != nil { + return nil, errors.Wrap(err, "VM.Resume for VMware") + } + return nil, nil + }) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) OnGuestDeployTaskDataReceived(ctx context.Context, guest *models.SGuest, task taskman.ITask, data jsonutils.JSONObject) error { + + if data.Contains("host_ip") { + oldHost, _ := guest.GetHost() + hostIp, _ := data.GetString("host_ip") + host, err := models.HostManager.GetHostByIp(oldHost.ManagerId, api.HOST_TYPE_ESXI, hostIp) + if err != nil { + return err + } + if host.Id != guest.HostId { + models.HostManager.ClearSchedDescCache(host.Id) + models.HostManager.ClearSchedDescCache(guest.HostId) + guest.OnScheduleToHost(ctx, task.GetUserCred(), host.Id) + } + } + + err := self.SManagedVirtualizedGuestDriver.OnGuestDeployTaskDataReceived(ctx, guest, task, data) + if err != nil { + return nil + } + + osInfo := struct { + Arch string + Distro string + Os string + Version string + }{} + data.Unmarshal(&osInfo) + + osinfo := map[string]interface{}{} + for k, v := range map[string]string{ + "os_arch": osInfo.Arch, + "os_distribution": osInfo.Distro, + "os_type": osInfo.Os, + "os_name": osInfo.Os, + "os_version": osInfo.Version, + } { + if len(v) > 0 { + osinfo[k] = v + } + } + if len(osinfo) > 0 { + err := guest.SetAllMetadata(ctx, osinfo, task.GetUserCred()) + if err != nil { + return errors.Wrap(err, "SetAllMetadata") + } + } + return nil +} + +func (self *SCloudpodsESXiGuestDriver) AllowReconfigGuest() bool { + return true +} + +func (self *SCloudpodsESXiGuestDriver) RequestSaveImage(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, task taskman.ITask) error { + disks := guest.CategorizeDisks() + opts := api.DiskSaveInput{} + task.GetParams().Unmarshal(&opts) + return disks.Root.StartDiskSaveTask(ctx, userCred, opts, task.GetTaskId()) +} + +func (self *SCloudpodsESXiGuestDriver) DoGuestCreateDisksTask(ctx context.Context, guest *models.SGuest, task taskman.ITask) error { + subtask, err := taskman.TaskManager.NewTask(ctx, "CloudpodsESXiGuestCreateDiskTask", guest, task.GetUserCred(), task.GetParams(), task.GetTaskId(), "", nil) + if err != nil { + return err + } + subtask.ScheduleRun(nil) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) RequestRenewInstance(ctx context.Context, guest *models.SGuest, bc billing.SBillingCycle) (time.Time, error) { + return time.Time{}, nil +} + +func (self *SCloudpodsESXiGuestDriver) IsSupportEip() bool { + return false +} + +func (self *SCloudpodsESXiGuestDriver) IsSupportCdrom(guest *models.SGuest) (bool, error) { + return false, nil +} + +func (self *SCloudpodsESXiGuestDriver) IsSupportMigrate() bool { + return true +} + +func (self *SCloudpodsESXiGuestDriver) IsSupportLiveMigrate() bool { + return true +} + +func (self *SCloudpodsESXiGuestDriver) CheckMigrate(ctx context.Context, guest *models.SGuest, userCred mcclient.TokenCredential, input api.GuestMigrateInput) error { + return nil +} + +func (self *SCloudpodsESXiGuestDriver) CheckLiveMigrate(ctx context.Context, guest *models.SGuest, userCred mcclient.TokenCredential, input api.GuestLiveMigrateInput) error { + return nil +} + +func (self *SCloudpodsESXiGuestDriver) RequestMigrate(ctx context.Context, guest *models.SGuest, userCred mcclient.TokenCredential, input api.GuestMigrateInput, task taskman.ITask) error { + return self.RequestLiveMigrate(ctx, guest, userCred, api.GuestLiveMigrateInput{PreferHostId: input.PreferHostId}, task) +} + +func (self *SCloudpodsESXiGuestDriver) RequestLiveMigrate(ctx context.Context, guest *models.SGuest, userCred mcclient.TokenCredential, input api.GuestLiveMigrateInput, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + iVM, err := guest.GetIVM(ctx) + if err != nil { + return nil, errors.Wrap(err, "guest.GetIVM") + } + iHost, err := models.HostManager.FetchById(input.PreferHostId) + if err != nil { + return nil, errors.Wrapf(err, "models.HostManager.FetchById(%s)", input.PreferHostId) + } + host := iHost.(*models.SHost) + hostExternalId := host.ExternalId + if err = iVM.LiveMigrateVM(hostExternalId); err != nil { + return nil, errors.Wrapf(err, "iVM.LiveMigrateVM(%s)", hostExternalId) + } + hostExternalId = iVM.GetIHostId() + if hostExternalId == "" { + return nil, errors.Wrap(fmt.Errorf("empty hostExternalId"), "iVM.GetIHostId()") + } + iHost, err = db.FetchByExternalIdAndManagerId(models.HostManager, hostExternalId, func(q *sqlchemy.SQuery) *sqlchemy.SQuery { + if host, _ := guest.GetHost(); host != nil { + return q.Equals("manager_id", host.ManagerId) + } + return q + }) + if err != nil { + return nil, errors.Wrapf(err, "db.FetchByExternalId(models.HostManager,%s)", hostExternalId) + } + host = iHost.(*models.SHost) + _, err = db.Update(guest, func() error { + guest.HostId = host.GetId() + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "db.Update guest.hostId") + } + disks, err := guest.GetDisks() + if err != nil { + return nil, errors.Wrapf(err, "GetDisks") + } + iRegion, err := host.GetIRegion(ctx) + if err != nil { + return nil, errors.Wrapf(err, "GetIRegion") + } + for i := range disks { + iDisk, err := iRegion.GetIDiskById(disks[i].ExternalId) + if err != nil { + return nil, errors.Wrapf(err, "GetIDisk(%s)", disks[i].ExternalId) + } + iStorage, err := db.FetchByExternalIdAndManagerId(models.StorageManager, iDisk.GetIStorageId(), func(q *sqlchemy.SQuery) *sqlchemy.SQuery { + hcs := models.HoststorageManager.Query().SubQuery() + return q.Join(hcs, sqlchemy.Equals(hcs.Field("storage_id"), q.Field("id"))).Filter(sqlchemy.Equals(hcs.Field("host_id"), host.GetId())) + }) + if err != nil { + return nil, errors.Wrapf(err, "FetchStorageByExternalId(%s)", iDisk.GetIStorageId()) + } + storage := iStorage.(*models.SStorage) + _, err = db.Update(&disks[i], func() error { + disks[i].StorageId = storage.Id + return nil + }) + if err != nil { + return nil, errors.Wrapf(err, "db.Update disk %s storageid", disks[i].Name) + } + } + return nil, nil + }) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) RequestStartOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, userCred mcclient.TokenCredential, task taskman.ITask) error { + ivm, err := guest.GetIVM(ctx) + if err != nil { + return errors.Wrapf(err, "GetIVM") + } + + result := jsonutils.NewDict() + if ivm.GetStatus() != api.VM_RUNNING { + err := ivm.StartVM(ctx) + if err != nil { + return errors.Wrapf(err, "StartVM") + } + err = cloudprovider.WaitStatus(ivm, api.VM_RUNNING, time.Second*5, time.Minute*10) + if err != nil { + return errors.Wrapf(err, "Wait vm running") + } + guest.SetStatus(ctx, userCred, api.VM_RUNNING, "StartOnHost") + return task.ScheduleRun(result) + } + return guest.SetStatus(ctx, userCred, api.VM_RUNNING, "StartOnHost") +} + +func (self *SCloudpodsESXiGuestDriver) RequestStopOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask, syncStatus bool) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + ivm, err := guest.GetIVM(ctx) + if err != nil { + return nil, errors.Wrapf(err, "guest.GetIVM") + } + opts := &cloudprovider.ServerStopOptions{} + task.GetParams().Unmarshal(opts) + err = ivm.StopVM(ctx, opts) + if err != nil { + return nil, errors.Wrapf(err, "ivm.StopVM") + } + err = cloudprovider.WaitStatus(ivm, api.VM_READY, time.Second*3, time.Minute*5) + if err != nil { + return nil, errors.Wrapf(err, "wait server stop after 5 miniutes") + } + return nil, nil + }) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) RequestSyncstatusOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, userCred mcclient.TokenCredential, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + ihost, err := host.GetIHost(ctx) + if err != nil { + return nil, errors.Wrap(err, "host.GetIHost") + } + ivm, err := ihost.GetIVMById(guest.GetExternalId()) + if err != nil { + if errors.Cause(err) != errors.ErrNotFound { + return nil, errors.Wrap(err, "ihost.GetIVMById") + } + // VM may be migrated by Vcenter, try to find VM from whole datacenter. + ehost := ihost.(*esxi.SHost) + dc, err := ehost.GetDatacenter() + if err != nil { + return nil, errors.Wrapf(err, "ehost.GetDatacenter") + } + vm, err := dc.FetchVMById(guest.GetExternalId()) + if err != nil { + log.Errorf("fail to find ivm by id %q in dc %q: %v", guest.GetExternalId(), dc.GetName(), err) + return nil, errors.Wrap(err, "dc.FetchVMById") + } + ihost = vm.GetIHost() + host = models.HostManager.FetchHostByExtId(ihost.GetGlobalId()) + if host == nil { + return nil, errors.Wrapf(errors.ErrNotFound, "find ivm %q in ihost %q which is not existed here", guest.GetExternalId(), ihost.GetGlobalId()) + } + ivm = vm + } + err = guest.SyncAllWithCloudVM(ctx, userCred, host, ivm, true) + if err != nil { + return nil, errors.Wrap(err, "guest.SyncAllWithCloudVM") + } + + status := GetCloudVMStatus(ivm) + body := jsonutils.NewDict() + body.Add(jsonutils.NewString(status), "status") + return body, nil + }) + return nil +} + +func (self *SCloudpodsESXiGuestDriver) ValidateRebuildRoot(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, input *api.ServerRebuildRootInput) (*api.ServerRebuildRootInput, error) { + // check snapshot + count, err := guest.GetInstanceSnapshotCount() + if err != nil { + return nil, errors.Wrapf(err, "unable to GetInstanceSnapshotCount for guest %q", guest.GetId()) + } + if count > 0 { + return input, httperrors.NewForbiddenError("can't rebuild root for a guest with instance snapshots") + } + return input, nil +} + +func (self *SCloudpodsESXiGuestDriver) StartDeleteGuestTask(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, params *jsonutils.JSONDict, parentTaskId string) error { + params.Add(jsonutils.JSONTrue, "delete_snapshots") + return self.SBaseGuestDriver.StartDeleteGuestTask(ctx, userCred, guest, params, parentTaskId) +} + +func (self *SCloudpodsESXiGuestDriver) SyncOsInfo(ctx context.Context, userCred mcclient.TokenCredential, g *models.SGuest, extVM cloudprovider.IOSInfo) error { + ometa, err := g.GetAllMetadata(ctx, userCred) + if err != nil { + return errors.Wrap(err, "GetAllMetadata") + } + // save os info + osinfo := map[string]interface{}{} + for k, v := range map[string]string{ + "os_full_name": extVM.GetFullOsName(), + "os_name": string(extVM.GetOsType()), + "os_arch": extVM.GetOsArch(), + "os_type": string(extVM.GetOsType()), + "os_distribution": extVM.GetOsDist(), + "os_version": extVM.GetOsVersion(), + "os_language": extVM.GetOsLang(), + } { + if len(v) == 0 || len(ometa[k]) > 0 { + continue + } + osinfo[k] = v + } + if len(osinfo) > 0 { + err := g.SetAllMetadata(ctx, osinfo, userCred) + if err != nil { + return errors.Wrap(err, "SetAllMetadata") + } + } + return nil +} + +func (drv *SCloudpodsESXiGuestDriver) RequestUndeployGuestOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + iVm, err := guest.GetIVM(ctx) + if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "GetIVM") + } + + err = iVm.DeleteVM(ctx) + if err != nil { + return nil, errors.Wrapf(err, "DeleteVM") + } + + return nil, cloudprovider.WaitDeleted(iVm, time.Second*10, time.Minute*3) + }) + return nil +} + +func (drv *SCloudpodsESXiGuestDriver) ValidateGuestHotChangeConfigInput(ctx context.Context, guest *models.SGuest, confs *api.ServerChangeConfigSettings) (*api.ServerChangeConfigSettings, error) { + // cannot chagne esxi VM CPU cores per sockets + corePerSocket := guest.VcpuCount / guest.CpuSockets + if confs.VcpuCount%corePerSocket != 0 { + return confs, errors.Wrapf(httperrors.ErrInputParameter, "cpu count %d should be times of %d", confs.VcpuCount, corePerSocket) + } + confs.CpuSockets = confs.VcpuCount / corePerSocket + + // https://kb.vmware.com/s/article/2008405 + // cannot increase memory beyond 3G if the initial CPU memory is lower than 3G + startVmem := guest.VmemSize + vmemMbStr := guest.GetMetadata(ctx, api.VM_METADATA_START_VMEM_MB, nil) + if len(vmemMbStr) > 0 { + vmemMb, _ := strconv.Atoi(vmemMbStr) + if vmemMb > 0 { + startVmem = int(vmemMb) + } + } + maxAllowVmem := 16 * startVmem + if startVmem <= 3*1024 { + maxAllowVmem = 3 * 1024 + } + if confs.VmemSize > maxAllowVmem { + return confs, errors.Wrapf(httperrors.ErrInputParameter, "memory cannot be resized beyond %dMB", maxAllowVmem) + } + return confs, nil +} + +func (esxi *SCloudpodsESXiGuestDriver) ValidateGuestChangeConfigInput(ctx context.Context, guest *models.SGuest, input api.ServerChangeConfigInput) (*api.ServerChangeConfigSettings, error) { + confs, err := esxi.SBaseGuestDriver.ValidateGuestChangeConfigInput(ctx, guest, input) + if err != nil { + return nil, errors.Wrap(err, "SBaseGuestDriver.ValidateGuestChangeConfigInput") + } + + if input.CpuSockets != nil && *input.CpuSockets > 0 { + confs.CpuSockets = *input.CpuSockets + } + + defaultStorageId := "" + if root, _ := guest.GetSystemDisk(); root != nil { + defaultStorageId = root.StorageId + } + storages, err := guest.GetStorages() + if err != nil { + return nil, errors.Wrapf(err, "GetStorages") + } + storageMap := map[string]string{} + for _, storage := range storages { + storageMap[storage.StorageType] = storage.Id + if len(defaultStorageId) == 0 { + defaultStorageId = storage.Id + } + } + for i := range confs.Create { + confs.Create[i].Format = "vmdk" + if len(confs.Create[i].Storage) == 0 { + // 若不指定存储类型,默认和系统盘一致 + if len(confs.Create[i].Backend) == 0 { + confs.Create[i].Storage = defaultStorageId + } else if storageId, ok := storageMap[confs.Create[i].Backend]; ok { // 否则和已有磁盘存储保持一致 + confs.Create[i].Storage = storageId + } + } + } + return confs, nil +} diff --git a/pkg/compute/guestdrivers/cloudpods.go b/pkg/compute/guestdrivers/cloudpods-kvm.go similarity index 98% rename from pkg/compute/guestdrivers/cloudpods.go rename to pkg/compute/guestdrivers/cloudpods-kvm.go index 99a1a625f27..fe74b0b4513 100644 --- a/pkg/compute/guestdrivers/cloudpods.go +++ b/pkg/compute/guestdrivers/cloudpods-kvm.go @@ -49,7 +49,7 @@ func (self *SCloudpodsGuestDriver) DoScheduleSKUFilter() bool { return false } func (self *SCloudpodsGuestDriver) DoScheduleStorageFilter() bool { return true } func (self *SCloudpodsGuestDriver) GetHypervisor() string { - return api.HYPERVISOR_CLOUDPODS + return api.HYPERVISOR_DEFAULT } func (self *SCloudpodsGuestDriver) GetProvider() string { @@ -124,7 +124,7 @@ func (self *SCloudpodsGuestDriver) GetComputeQuotaKeys(scope rbacscope.TRbacScop keys.CloudEnv = api.CLOUD_ENV_PRIVATE_CLOUD keys.Provider = api.CLOUD_PROVIDER_CLOUDPODS keys.Brand = brand - keys.Hypervisor = api.HYPERVISOR_CLOUDPODS + keys.Hypervisor = api.HYPERVISOR_DEFAULT return keys } diff --git a/pkg/compute/guestdrivers/h3c.go b/pkg/compute/guestdrivers/h3c.go index d9d45f0296c..af48dd97cfc 100644 --- a/pkg/compute/guestdrivers/h3c.go +++ b/pkg/compute/guestdrivers/h3c.go @@ -78,7 +78,7 @@ func (self *SH3CGuestDriver) GetMinimalSysDiskSizeGb() int { } func (self *SH3CGuestDriver) GetStorageTypes() []string { - storages, _ := models.StorageManager.GetStorageTypesByHostType(api.HYPERVISOR_HOSTTYPE[self.GetHypervisor()]) + storages, _ := models.StorageManager.GetStorageTypesByProvider(self.GetProvider()) return storages } diff --git a/pkg/compute/guestdrivers/hcs.go b/pkg/compute/guestdrivers/hcs.go index 76588da4f43..03c2cad4a16 100644 --- a/pkg/compute/guestdrivers/hcs.go +++ b/pkg/compute/guestdrivers/hcs.go @@ -78,7 +78,7 @@ func (self *SHCSGuestDriver) GetMinimalSysDiskSizeGb() int { } func (self *SHCSGuestDriver) GetStorageTypes() []string { - storages, _ := models.StorageManager.GetStorageTypesByHostType(api.HYPERVISOR_HOSTTYPE[self.GetHypervisor()]) + storages, _ := models.StorageManager.GetStorageTypesByProvider(self.GetProvider()) return storages } diff --git a/pkg/compute/guestdrivers/hcsop.go b/pkg/compute/guestdrivers/hcsop.go index a4eb2795e11..f9d07885c37 100644 --- a/pkg/compute/guestdrivers/hcsop.go +++ b/pkg/compute/guestdrivers/hcsop.go @@ -78,7 +78,7 @@ func (self *SHCSOPGuestDriver) GetMinimalSysDiskSizeGb() int { } func (self *SHCSOPGuestDriver) GetStorageTypes() []string { - storages, _ := models.StorageManager.GetStorageTypesByHostType(api.HYPERVISOR_HOSTTYPE[self.GetHypervisor()]) + storages, _ := models.StorageManager.GetStorageTypesByProvider(self.GetProvider()) return storages } diff --git a/pkg/compute/guestdrivers/kvm.go b/pkg/compute/guestdrivers/kvm.go index d43a2d81662..caba863fbe7 100644 --- a/pkg/compute/guestdrivers/kvm.go +++ b/pkg/compute/guestdrivers/kvm.go @@ -314,7 +314,11 @@ func (self *SKVMGuestDriver) RequestStartOnHost(ctx context.Context, guest *mode header := self.getTaskRequestHeader(task) config := jsonutils.NewDict() - desc, err := guest.GetDriver().GetJsonDescAtHost(ctx, userCred, guest, host, nil) + drv, err := guest.GetDriver() + if err != nil { + return err + } + desc, err := drv.GetJsonDescAtHost(ctx, userCred, guest, host, nil) if err != nil { return errors.Wrapf(err, "GetJsonDescAtHost") } @@ -618,7 +622,11 @@ func (self *SKVMGuestDriver) ValidateResizeDisk(guest *models.SGuest, disk *mode } func (self *SKVMGuestDriver) RequestSyncConfigOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error { - desc, err := guest.GetDriver().GetJsonDescAtHost(ctx, task.GetUserCred(), guest, host, nil) + drv, err := guest.GetDriver() + if err != nil { + return err + } + desc, err := drv.GetJsonDescAtHost(ctx, task.GetUserCred(), guest, host, nil) if err != nil { return errors.Wrapf(err, "GetJsonDescAtHost") } @@ -689,8 +697,15 @@ func (self *SKVMGuestDriver) RequestRebuildRootDisk(ctx context.Context, guest * } func (self *SKVMGuestDriver) RequestSyncToBackup(ctx context.Context, guest *models.SGuest, task taskman.ITask) error { - host, _ := guest.GetHost() - desc, err := guest.GetDriver().GetJsonDescAtHost(ctx, task.GetUserCred(), guest, host, nil) + host, err := guest.GetHost() + if err != nil { + return err + } + drv, err := guest.GetDriver() + if err != nil { + return err + } + desc, err := drv.GetJsonDescAtHost(ctx, task.GetUserCred(), guest, host, nil) if err != nil { return errors.Wrapf(err, "GetJsonDescAtHost") } diff --git a/pkg/compute/guestdrivers/managedvirtual.go b/pkg/compute/guestdrivers/managedvirtual.go index ba497dd4e22..f68ebe6b229 100644 --- a/pkg/compute/guestdrivers/managedvirtual.go +++ b/pkg/compute/guestdrivers/managedvirtual.go @@ -62,11 +62,15 @@ func (d SManagedVirtualizedGuestDriver) DoScheduleStorageFilter() bool { return func (d SManagedVirtualizedGuestDriver) DoScheduleCloudproviderTagFilter() bool { return true } func (drv *SManagedVirtualizedGuestDriver) GetJsonDescAtHost(ctx context.Context, userCred mcclient.TokenCredential, guest *models.SGuest, host *models.SHost, params *jsonutils.JSONDict) (jsonutils.JSONObject, error) { + driver, err := guest.GetDriver() + if err != nil { + return nil, err + } config := cloudprovider.SManagedVMCreateConfig{ - IsNeedInjectPasswordByCloudInit: guest.GetDriver().IsNeedInjectPasswordByCloudInit(), - UserDataType: guest.GetDriver().GetUserDataType(), - WindowsUserDataType: guest.GetDriver().GetWindowsUserDataType(), - IsWindowsUserDataTypeNeedEncode: guest.GetDriver().IsWindowsUserDataTypeNeedEncode(), + IsNeedInjectPasswordByCloudInit: driver.IsNeedInjectPasswordByCloudInit(), + UserDataType: driver.GetUserDataType(), + WindowsUserDataType: driver.GetWindowsUserDataType(), + IsWindowsUserDataTypeNeedEncode: driver.IsWindowsUserDataTypeNeedEncode(), } config.Name = guest.Name config.NameEn = pinyinutils.Text2Pinyin(guest.Name) @@ -100,7 +104,6 @@ func (drv *SManagedVirtualizedGuestDriver) GetJsonDescAtHost(ctx context.Context config.IpAddr = nics[0].IpAddr } - var err error provider := host.GetCloudprovider() config.ProjectId, err = provider.SyncProject(ctx, userCred, guest.ProjectId) if err != nil { @@ -500,7 +503,11 @@ func (drv *SManagedVirtualizedGuestDriver) RequestDeployGuestOnHost(ctx context. ImageType string }{} config.Unmarshal(&osInfo, "desc") - desc.Account = guest.GetDriver().GetDefaultAccount(osInfo.OsType, osInfo.OsDistribution, osInfo.ImageType) + driver, err := guest.GetDriver() + if err != nil { + return err + } + desc.Account = driver.GetDefaultAccount(osInfo.OsType, osInfo.OsDistribution, osInfo.ImageType) err = desc.GetConfig(config) if err != nil { return errors.Wrapf(err, "desc.GetConfig") @@ -523,10 +530,14 @@ func (drv *SManagedVirtualizedGuestDriver) RequestDeployGuestOnHost(ctx context. return err } + region, err := host.GetRegion() + if err != nil { + return errors.Wrapf(err, "GetRegion") + } + switch action { case "create": - region, _ := host.GetRegion() - if len(desc.InstanceType) == 0 && region != nil && utils.IsInStringArray(guest.Hypervisor, api.PUBLIC_CLOUD_HYPERVISORS) { + if len(desc.InstanceType) == 0 && region != nil && utils.IsInStringArray(region.Provider, api.PUBLIC_CLOUD_PROVIDERS) { sku, err := models.ServerSkuManager.GetMatchedSku(region.GetId(), int64(desc.Cpu), int64(desc.MemoryMB)) if err != nil { return errors.Wrap(err, "ManagedVirtualizedGuestDriver.RequestDeployGuestOnHost.GetMatchedSku") @@ -540,15 +551,15 @@ func (drv *SManagedVirtualizedGuestDriver) RequestDeployGuestOnHost(ctx context. } taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { - return guest.GetDriver().RemoteDeployGuestForCreate(ctx, task.GetUserCred(), guest, host, desc) + return driver.RemoteDeployGuestForCreate(ctx, task.GetUserCred(), guest, host, desc) }) case "deploy": taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { - return guest.GetDriver().RemoteDeployGuestForDeploy(ctx, guest, ihost, task, desc) + return driver.RemoteDeployGuestForDeploy(ctx, guest, ihost, task, desc) }) case "rebuild": taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { - return guest.GetDriver().RemoteDeployGuestForRebuildRoot(ctx, guest, ihost, task, desc) + return driver.RemoteDeployGuestForRebuildRoot(ctx, guest, ihost, task, desc) }) default: log.Errorf("RequestDeployGuestOnHost: Action %s not supported", action) @@ -629,14 +640,18 @@ func (drv *SManagedVirtualizedGuestDriver) RemoteDeployGuestForCreate(ctx contex if err != nil { return nil, err } + driver, err := guest.GetDriver() + if err != nil { + return nil, err + } // iVM 实际所在的ihost 可能和 调度选择的host不是同一个,此处根据iVM实际所在host,重新同步 - ihost, err = guest.GetDriver().RemoteDeployGuestSyncHost(ctx, userCred, guest, host, iVM) + ihost, err = driver.RemoteDeployGuestSyncHost(ctx, userCred, guest, host, iVM) if err != nil { return nil, errors.Wrap(err, "RemoteDeployGuestSyncHost") } vmId := iVM.GetGlobalId() - initialState := guest.GetDriver().GetGuestInitialStateAfterCreate() + initialState := driver.GetGuestInitialStateAfterCreate() log.Debugf("VMcreated %s, wait status %s ...", vmId, initialState) err = cloudprovider.WaitStatusWithInstanceErrorCheck(iVM, initialState, time.Second*5, time.Second*1800, func() error { return iVM.GetError() @@ -651,7 +666,7 @@ func (drv *SManagedVirtualizedGuestDriver) RemoteDeployGuestForCreate(ctx contex return nil, errors.Wrapf(err, "GetIVMById(%s)", vmId) } - if guest.GetDriver().GetMaxSecurityGroupCount() > 0 { + if driver.GetMaxSecurityGroupCount() > 0 { err = iVM.SetSecurityGroups(desc.ExternalSecgroupIds) if err != nil { return nil, errors.Wrapf(err, "SetSecurityGroups") @@ -708,7 +723,7 @@ func (drv *SManagedVirtualizedGuestDriver) RemoteDeployGuestForCreate(ctx contex } } - guest.GetDriver().RemoteActionAfterGuestCreated(ctx, userCred, guest, host, iVM, &desc) + driver.RemoteActionAfterGuestCreated(ctx, userCred, guest, host, iVM, &desc) data := fetchIVMinfo(desc, iVM, guest.Id, desc.Account, desc.Password, desc.PublicKey, "create") return data, nil @@ -821,7 +836,12 @@ func (drv *SManagedVirtualizedGuestDriver) RemoteDeployGuestForRebuildRoot(ctx c return nil, err } - initialState := guest.GetDriver().GetGuestInitialStateAfterRebuild() + driver, err := guest.GetDriver() + if err != nil { + return nil, err + } + + initialState := driver.GetGuestInitialStateAfterRebuild() log.Debugf("VMrebuildRoot %s new diskID %s, wait status %s ...", iVM.GetGlobalId(), diskId, initialState) err = cloudprovider.WaitStatus(iVM, initialState, time.Second*5, time.Second*1800) if err != nil { @@ -1192,7 +1212,9 @@ func (drv *SManagedVirtualizedGuestDriver) OnGuestDeployTaskDataReceived(ctx con if err == nil && !guest.IsPrepaidRecycle() { guest.SaveRenewInfo(ctx, task.GetUserCred(), nil, &exp, "") } - if guest.GetDriver().IsSupportSetAutoRenew() { + + driver, _ := guest.GetDriver() + if driver != nil && driver.IsSupportSetAutoRenew() { autoRenew, _ := data.Bool("auto_renew") guest.SetAutoRenew(autoRenew) } @@ -1231,7 +1253,11 @@ func (drv *SManagedVirtualizedGuestDriver) RequestSyncConfigOnHost(ctx context.C taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { if jsonutils.QueryBoolean(task.GetParams(), "fw_only", false) { - err := guest.GetDriver().RequestSyncSecgroupsOnHost(ctx, guest, host, task) + driver, err := guest.GetDriver() + if err != nil { + return nil, err + } + err = driver.RequestSyncSecgroupsOnHost(ctx, guest, host, task) if err != nil { return nil, err } diff --git a/pkg/compute/guestdrivers/openstack.go b/pkg/compute/guestdrivers/openstack.go index b270e28a2e3..6db7eb361e7 100644 --- a/pkg/compute/guestdrivers/openstack.go +++ b/pkg/compute/guestdrivers/openstack.go @@ -113,7 +113,7 @@ func (self *SOpenStackGuestDriver) GetMinimalSysDiskSizeGb() int { } func (self *SOpenStackGuestDriver) GetStorageTypes() []string { - storages, _ := models.StorageManager.GetStorageTypesByHostType(api.HYPERVISOR_HOSTTYPE[self.GetHypervisor()]) + storages, _ := models.StorageManager.GetStorageTypesByProvider(self.GetProvider()) return storages } @@ -218,6 +218,11 @@ func (self *SOpenStackGuestDriver) RemoteDeployGuestForRebuildRoot(ctx context.C return nil, fmt.Errorf("cannot find vm %s(%s)", guest.Id, guest.Name) } + driver, err := guest.GetDriver() + if err != nil { + return nil, err + } + instanceId := iVM.GetGlobalId() diskId, err := func() (string, error) { @@ -290,7 +295,7 @@ func (self *SOpenStackGuestDriver) RemoteDeployGuestForRebuildRoot(ctx context.C instanceId = iVM.GetGlobalId() db.SetExternalId(guest, task.GetUserCred(), instanceId) - initialState := guest.GetDriver().GetGuestInitialStateAfterCreate() + initialState := driver.GetGuestInitialStateAfterCreate() log.Debugf("VMrebuildRoot %s new instance, wait status %s ...", iVM.GetGlobalId(), initialState) cloudprovider.WaitStatus(iVM, initialState, time.Second*5, time.Second*1800) @@ -324,7 +329,7 @@ func (self *SOpenStackGuestDriver) RemoteDeployGuestForRebuildRoot(ctx context.C return nil, err } - initialState := guest.GetDriver().GetGuestInitialStateAfterRebuild() + initialState := driver.GetGuestInitialStateAfterRebuild() log.Debugf("VMrebuildRoot %s new diskID %s, wait status %s ...", iVM.GetGlobalId(), diskId, initialState) err = cloudprovider.WaitStatus(iVM, initialState, time.Second*5, time.Second*1800) if err != nil { diff --git a/pkg/compute/guestdrivers/pod.go b/pkg/compute/guestdrivers/pod.go index bf0f2d77e31..af548c4e2a3 100644 --- a/pkg/compute/guestdrivers/pod.go +++ b/pkg/compute/guestdrivers/pod.go @@ -251,7 +251,11 @@ func (p *SPodDriver) RequestStartOnHost(ctx context.Context, guest *models.SGues header := p.getTaskRequestHeader(task) config := jsonutils.NewDict() - desc, err := guest.GetDriver().GetJsonDescAtHost(ctx, task.GetUserCred(), guest, host, nil) + drv, err := guest.GetDriver() + if err != nil { + return err + } + desc, err := drv.GetJsonDescAtHost(ctx, task.GetUserCred(), guest, host, nil) if err != nil { return errors.Wrapf(err, "GetJsonDescAtHost") } diff --git a/pkg/compute/guestdrivers/proxmox.go b/pkg/compute/guestdrivers/proxmox.go index ad10bf56574..fabdaff6e6e 100644 --- a/pkg/compute/guestdrivers/proxmox.go +++ b/pkg/compute/guestdrivers/proxmox.go @@ -153,8 +153,7 @@ func (self *SProxmoxGuestDriver) ValidateCreateEip(ctx context.Context, userCred } func (self *SProxmoxGuestDriver) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, input *api.ServerCreateInput) (*api.ServerCreateInput, error) { - driver := models.GetDriver(input.Hypervisor) - if len(input.UserData) > 0 && driver != nil && driver.IsNeedInjectPasswordByCloudInit() { + if len(input.UserData) > 0 { _, err := cloudinit.ParseUserData(input.UserData) if err != nil { return nil, err diff --git a/pkg/compute/guestdrivers/virtualization.go b/pkg/compute/guestdrivers/virtualization.go index 1fd3bd6dfa5..3a02413b853 100644 --- a/pkg/compute/guestdrivers/virtualization.go +++ b/pkg/compute/guestdrivers/virtualization.go @@ -100,7 +100,11 @@ func (self *SVirtualizedGuestDriver) Attach2RandomNetwork(guest *models.SGuest, } hostNetifs := host.GetHostNetInterfaces() netsAvaiable := make([]models.SNetwork, 0) - netTypes := guest.GetDriver().GetRandomNetworkTypes() + driver, err := guest.GetDriver() + if err != nil { + return nil, err + } + netTypes := driver.GetRandomNetworkTypes() if len(netConfig.NetType) > 0 { netTypes = []string{netConfig.NetType} } diff --git a/pkg/compute/hostdrivers/aliyun.go b/pkg/compute/hostdrivers/aliyun.go index 6e7910387a9..a8636237f4d 100644 --- a/pkg/compute/hostdrivers/aliyun.go +++ b/pkg/compute/hostdrivers/aliyun.go @@ -43,6 +43,10 @@ func (self *SAliyunHostDriver) GetHypervisor() string { return api.HYPERVISOR_ALIYUN } +func (self *SAliyunHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ALIYUN +} + func (self *SAliyunHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { minGB := -1 maxGB := -1 diff --git a/pkg/compute/hostdrivers/apsara.go b/pkg/compute/hostdrivers/apsara.go index 22930bfc026..1735f4af867 100644 --- a/pkg/compute/hostdrivers/apsara.go +++ b/pkg/compute/hostdrivers/apsara.go @@ -35,3 +35,7 @@ func (self *SApsaraHostDriver) GetHostType() string { func (self *SApsaraHostDriver) GetHypervisor() string { return api.HYPERVISOR_APSARA } + +func (self *SApsaraHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_APSARA +} diff --git a/pkg/compute/hostdrivers/aws.go b/pkg/compute/hostdrivers/aws.go index a1be937dec9..1d9590da20c 100644 --- a/pkg/compute/hostdrivers/aws.go +++ b/pkg/compute/hostdrivers/aws.go @@ -43,6 +43,10 @@ func (self *SAwsHostDriver) GetHypervisor() string { return api.HYPERVISOR_AWS } +func (self *SAwsHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_AWS +} + func (self *SAwsHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if storage.StorageType == api.STORAGE_GP2_SSD || storage.StorageType == api.STORAGE_GP3_SSD { if sizeGb < 1 || sizeGb > 16384 { diff --git a/pkg/compute/hostdrivers/azure.go b/pkg/compute/hostdrivers/azure.go index 30f3c5ed680..d1363a757ff 100644 --- a/pkg/compute/hostdrivers/azure.go +++ b/pkg/compute/hostdrivers/azure.go @@ -44,7 +44,11 @@ func (self *SAzureHostDriver) GetHypervisor() string { return api.HYPERVISOR_AZURE } -func (self *SAzureHostDriver) ValidateUpdateDisk(ctx context.Context, userCred mcclient.TokenCredential, input api.DiskUpdateInput) (api.DiskUpdateInput, error) { +func (self *SAzureHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_AZURE +} + +func (self *SAzureHostDriver) ValidateUpdateDisk(ctx context.Context, userCred mcclient.TokenCredential, input *api.DiskUpdateInput) (*api.DiskUpdateInput, error) { if len(input.Name) > 0 { return input, httperrors.NewInputParameterError("cannot support change azure disk name") } diff --git a/pkg/compute/hostdrivers/baidu.go b/pkg/compute/hostdrivers/baidu.go index dcf3ca6db2f..97fd695385e 100644 --- a/pkg/compute/hostdrivers/baidu.go +++ b/pkg/compute/hostdrivers/baidu.go @@ -35,3 +35,7 @@ func (self *SBaiduHostDriver) GetHostType() string { func (self *SBaiduHostDriver) GetHypervisor() string { return api.HYPERVISOR_BAIDU } + +func (self *SBaiduHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_BAIDU +} diff --git a/pkg/compute/hostdrivers/baremetal.go b/pkg/compute/hostdrivers/baremetal.go index fbbd9f60d60..71a401e5d2e 100644 --- a/pkg/compute/hostdrivers/baremetal.go +++ b/pkg/compute/hostdrivers/baremetal.go @@ -44,6 +44,38 @@ func (self *SBaremetalHostDriver) GetHypervisor() string { return api.HYPERVISOR_BAREMETAL } +func (self *SBaremetalHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ONECLOUD +} + +func (self *SBaremetalHostDriver) RequestBaremetalUnmaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + url := fmt.Sprintf("/baremetals/%s/unmaintenance", baremetal.Id) + headers := task.GetTaskRequestHeader() + _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, task.GetParams()) + return err +} + +func (self *SBaremetalHostDriver) RequestBaremetalMaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + url := fmt.Sprintf("/baremetals/%s/maintenance", baremetal.Id) + headers := task.GetTaskRequestHeader() + _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, task.GetParams()) + return err +} + +func (self *SBaremetalHostDriver) RequestSyncBaremetalHostStatus(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + url := fmt.Sprintf("/baremetals/%s/syncstatus", baremetal.Id) + headers := task.GetTaskRequestHeader() + _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, nil) + return err +} + +func (self *SBaremetalHostDriver) RequestSyncBaremetalHostConfig(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + url := fmt.Sprintf("/baremetals/%s/sync-config", baremetal.Id) + headers := task.GetTaskRequestHeader() + _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, nil) + return err +} + func (self *SBaremetalHostDriver) IsDisableImageCache(host *models.SHost) (bool, error) { agent := host.GetAgent(api.AgentTypeBaremetal) if agent == nil { diff --git a/pkg/compute/hostdrivers/base.go b/pkg/compute/hostdrivers/base.go index 70d2b95dbb5..3e5cd874171 100644 --- a/pkg/compute/hostdrivers/base.go +++ b/pkg/compute/hostdrivers/base.go @@ -18,6 +18,7 @@ import ( "context" "fmt" + "yunion.io/x/cloudmux/pkg/cloudprovider" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" @@ -35,7 +36,23 @@ import ( type SBaseHostDriver struct { } -func (self *SBaseHostDriver) ValidateUpdateDisk(ctx context.Context, userCred mcclient.TokenCredential, input api.DiskUpdateInput) (api.DiskUpdateInput, error) { +func (self *SBaseHostDriver) RequestBaremetalUnmaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + return errors.Wrapf(cloudprovider.ErrNotSupported, "RequestBaremetalUnmaintence") +} + +func (self *SBaseHostDriver) RequestBaremetalMaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + return errors.Wrapf(cloudprovider.ErrNotSupported, "RequestBaremetalMaintence") +} + +func (self *SBaseHostDriver) RequestSyncBaremetalHostStatus(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + return errors.Wrapf(cloudprovider.ErrNotSupported, "RequestSyncBaremetalHostStatus") +} + +func (self *SBaseHostDriver) RequestSyncBaremetalHostConfig(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + return errors.Wrapf(cloudprovider.ErrNotSupported, "RequestSyncBaremetalHostConfig") +} + +func (self *SBaseHostDriver) ValidateUpdateDisk(ctx context.Context, userCred mcclient.TokenCredential, input *api.DiskUpdateInput) (*api.DiskUpdateInput, error) { return input, nil } diff --git a/pkg/compute/hostdrivers/bingocloud.go b/pkg/compute/hostdrivers/bingocloud.go index 0932562209b..8fe471f41c2 100644 --- a/pkg/compute/hostdrivers/bingocloud.go +++ b/pkg/compute/hostdrivers/bingocloud.go @@ -36,6 +36,10 @@ func (self *SBingoCloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_BINGO_CLOUD } +func (self *SBingoCloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_BINGO_CLOUD +} + func (self *SBingoCloudHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/cloudpods-baremetal.go b/pkg/compute/hostdrivers/cloudpods-baremetal.go new file mode 100644 index 00000000000..5732651f3bb --- /dev/null +++ b/pkg/compute/hostdrivers/cloudpods-baremetal.go @@ -0,0 +1,137 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostdrivers + +import ( + "context" + "time" + + "yunion.io/x/cloudmux/pkg/cloudprovider" + "yunion.io/x/jsonutils" + "yunion.io/x/log" + "yunion.io/x/pkg/errors" + + api "yunion.io/x/onecloud/pkg/apis/compute" + "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" + "yunion.io/x/onecloud/pkg/compute/models" + "yunion.io/x/onecloud/pkg/mcclient" + "yunion.io/x/onecloud/pkg/mcclient/cloudpods" +) + +type SCloudpodsBaremetalHostDriver struct { + SManagedVirtualizationHostDriver +} + +func init() { + driver := SCloudpodsBaremetalHostDriver{} + models.RegisterHostDriver(&driver) +} + +func (self *SCloudpodsBaremetalHostDriver) GetHostType() string { + return api.HOST_TYPE_BAREMETAL +} + +func (self *SCloudpodsBaremetalHostDriver) GetHypervisor() string { + return api.HYPERVISOR_BAREMETAL +} + +func (self *SCloudpodsBaremetalHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CLOUDPODS +} + +func (self *SCloudpodsBaremetalHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { + return nil +} + +func (driver *SCloudpodsBaremetalHostDriver) GetStoragecacheQuota(host *models.SHost) int { + return 100 +} + +func (driver *SCloudpodsBaremetalHostDriver) RequestBaremetalUnmaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + iHost, err := baremetal.GetIHost(ctx) + if err != nil { + return nil, errors.Wrapf(err, "GetIHost") + } + h := iHost.(*cloudpods.SHost) + err = h.Stop() + if err != nil { + return nil, err + } + err = cloudprovider.Wait(time.Second*10, time.Minute*10, func() (bool, error) { + err = iHost.Refresh() + if err != nil { + return false, errors.Wrapf(err, "Refresh") + } + status := iHost.GetStatus() + log.Debugf("expect baremetal host status %s current is: %s", api.HOST_STATUS_READY, status) + if status != api.HOST_STATUS_READY { + return false, nil + } + return true, nil + }) + if err != nil { + return nil, errors.Wrapf(err, "Wait status") + } + return nil, nil + }) + return nil +} + +func (driver *SCloudpodsBaremetalHostDriver) RequestBaremetalMaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + iHost, err := baremetal.GetIHost(ctx) + if err != nil { + return nil, errors.Wrapf(err, "GetIHost") + } + h := iHost.(*cloudpods.SHost) + err = h.Start() + if err != nil { + return nil, err + } + err = cloudprovider.Wait(time.Second*10, time.Minute*10, func() (bool, error) { + err = iHost.Refresh() + if err != nil { + return false, errors.Wrapf(err, "Refresh") + } + status := iHost.GetStatus() + log.Debugf("expect baremetal host status %s current is: %s", api.HOST_STATUS_RUNNING, status) + if status != api.HOST_STATUS_RUNNING { + return false, nil + } + return true, nil + }) + if err != nil { + return nil, errors.Wrapf(err, "Wait status") + } + return nil, nil + }) + return nil +} + +func (driver *SCloudpodsBaremetalHostDriver) RequestSyncBaremetalHostStatus(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) { + iHost, err := baremetal.GetIHost(ctx) + if err != nil { + return nil, errors.Wrapf(err, "GetIHost") + } + return nil, baremetal.SyncWithCloudHost(ctx, userCred, iHost) + }) + return nil +} + +func (driver *SCloudpodsBaremetalHostDriver) RequestSyncBaremetalHostConfig(ctx context.Context, userCred mcclient.TokenCredential, baremetal *models.SHost, task taskman.ITask) error { + return driver.RequestSyncBaremetalHostStatus(ctx, userCred, baremetal, task) +} diff --git a/pkg/compute/hostdrivers/cloudpods-esxi.go b/pkg/compute/hostdrivers/cloudpods-esxi.go new file mode 100644 index 00000000000..866dc54f846 --- /dev/null +++ b/pkg/compute/hostdrivers/cloudpods-esxi.go @@ -0,0 +1,49 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostdrivers + +import ( + api "yunion.io/x/onecloud/pkg/apis/compute" + "yunion.io/x/onecloud/pkg/compute/models" +) + +type SCloudpodsESXiHostDriver struct { + SManagedVirtualizationHostDriver +} + +func init() { + driver := SCloudpodsESXiHostDriver{} + models.RegisterHostDriver(&driver) +} + +func (self *SCloudpodsESXiHostDriver) GetHostType() string { + return api.HOST_TYPE_ESXI +} + +func (self *SCloudpodsESXiHostDriver) GetHypervisor() string { + return api.HYPERVISOR_ESXI +} + +func (self *SCloudpodsESXiHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CLOUDPODS +} + +func (self *SCloudpodsESXiHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { + return nil +} + +func (driver *SCloudpodsESXiHostDriver) GetStoragecacheQuota(host *models.SHost) int { + return 100 +} diff --git a/pkg/compute/hostdrivers/cloudpods.go b/pkg/compute/hostdrivers/cloudpods-kvm.go similarity index 88% rename from pkg/compute/hostdrivers/cloudpods.go rename to pkg/compute/hostdrivers/cloudpods-kvm.go index 358304bf3cb..b90d2b58277 100644 --- a/pkg/compute/hostdrivers/cloudpods.go +++ b/pkg/compute/hostdrivers/cloudpods-kvm.go @@ -29,11 +29,15 @@ func init() { } func (self *SCloudpodsHostDriver) GetHostType() string { - return api.HOST_TYPE_CLOUDPODS + return api.HOST_TYPE_HYPERVISOR } func (self *SCloudpodsHostDriver) GetHypervisor() string { - return api.HYPERVISOR_CLOUDPODS + return api.HYPERVISOR_DEFAULT +} + +func (self *SCloudpodsHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CLOUDPODS } func (self *SCloudpodsHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { diff --git a/pkg/compute/hostdrivers/container.go b/pkg/compute/hostdrivers/container.go index 38d2cc43255..1bb0d9bccb3 100644 --- a/pkg/compute/hostdrivers/container.go +++ b/pkg/compute/hostdrivers/container.go @@ -37,3 +37,7 @@ func (d *SContainerHostDriver) GetHostType() string { func (d *SContainerHostDriver) GetHypervisor() string { return api.HYPERVISOR_POD } + +func (d *SContainerHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ONECLOUD +} diff --git a/pkg/compute/hostdrivers/ctyun.go b/pkg/compute/hostdrivers/ctyun.go index 1dd5fd6f211..f933875a1b1 100644 --- a/pkg/compute/hostdrivers/ctyun.go +++ b/pkg/compute/hostdrivers/ctyun.go @@ -41,6 +41,10 @@ func (self *SCtyunHostDriver) GetHypervisor() string { return api.HYPERVISOR_CTYUN } +func (self *SCtyunHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CTYUN +} + // 系统盘必须至少40G func (self *SCtyunHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { switch storage.StorageType { diff --git a/pkg/compute/hostdrivers/cucloud.go b/pkg/compute/hostdrivers/cucloud.go index 0993602e601..268958530b2 100644 --- a/pkg/compute/hostdrivers/cucloud.go +++ b/pkg/compute/hostdrivers/cucloud.go @@ -35,3 +35,7 @@ func (self *SCucloudHostDriver) GetHostType() string { func (self *SCucloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_CUCLOUD } + +func (self *SCucloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_CUCLOUD +} diff --git a/pkg/compute/hostdrivers/ecloud.go b/pkg/compute/hostdrivers/ecloud.go index 7bebe7e2786..bb140f3ffc5 100644 --- a/pkg/compute/hostdrivers/ecloud.go +++ b/pkg/compute/hostdrivers/ecloud.go @@ -35,3 +35,7 @@ func (d *SEcloudHostDriver) GetHostType() string { func (d *SEcloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_ECLOUD } + +func (d *SEcloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ECLOUD +} diff --git a/pkg/compute/hostdrivers/esxi.go b/pkg/compute/hostdrivers/esxi.go index 46941ef2096..dc1c031c80b 100644 --- a/pkg/compute/hostdrivers/esxi.go +++ b/pkg/compute/hostdrivers/esxi.go @@ -48,6 +48,10 @@ func (self *SESXiHostDriver) GetHypervisor() string { return api.HYPERVISOR_ESXI } +func (self *SESXiHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ONECLOUD +} + func (self *SESXiHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/google.go b/pkg/compute/hostdrivers/google.go index 02634277c50..d0908698fa0 100644 --- a/pkg/compute/hostdrivers/google.go +++ b/pkg/compute/hostdrivers/google.go @@ -43,6 +43,10 @@ func (self *SGoogleHostDriver) GetHypervisor() string { return api.HYPERVISOR_GOOGLE } +func (self *SGoogleHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_GOOGLE +} + func (self *SGoogleHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { minGB := 10 maxGB := -1 diff --git a/pkg/compute/hostdrivers/h3c.go b/pkg/compute/hostdrivers/h3c.go index 4d47009bfdc..e1d598b9d46 100644 --- a/pkg/compute/hostdrivers/h3c.go +++ b/pkg/compute/hostdrivers/h3c.go @@ -41,6 +41,10 @@ func (self *SH3CHostDriver) GetHypervisor() string { return api.HYPERVISOR_H3C } +func (self *SH3CHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_H3C +} + // 系统盘必须至少40G func (self *SH3CHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if sizeGb < 1 || sizeGb > 65536 { diff --git a/pkg/compute/hostdrivers/hcs.go b/pkg/compute/hostdrivers/hcs.go index 9772afd021e..9b426b31eb8 100644 --- a/pkg/compute/hostdrivers/hcs.go +++ b/pkg/compute/hostdrivers/hcs.go @@ -41,6 +41,10 @@ func (self *SHCSHostDriver) GetHypervisor() string { return api.HYPERVISOR_HCS } +func (self *SHCSHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_HCS +} + // 系统盘必须至少40G func (self *SHCSHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if sizeGb < 1 || sizeGb > 65536 { diff --git a/pkg/compute/hostdrivers/hcsop.go b/pkg/compute/hostdrivers/hcsop.go index c7132a123e2..5ca4ce2d25d 100644 --- a/pkg/compute/hostdrivers/hcsop.go +++ b/pkg/compute/hostdrivers/hcsop.go @@ -41,6 +41,10 @@ func (self *SHCSOPHostDriver) GetHypervisor() string { return api.HYPERVISOR_HCSOP } +func (self *SHCSOPHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_HCSOP +} + // 系统盘必须至少40G func (self *SHCSOPHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if sizeGb < 1 || sizeGb > 65536 { diff --git a/pkg/compute/hostdrivers/huawei.go b/pkg/compute/hostdrivers/huawei.go index 372508678ee..cd6490a9c63 100644 --- a/pkg/compute/hostdrivers/huawei.go +++ b/pkg/compute/hostdrivers/huawei.go @@ -41,6 +41,10 @@ func (self *SHuaweiHostDriver) GetHypervisor() string { return api.HYPERVISOR_HUAWEI } +func (self *SHuaweiHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_HUAWEI +} + // 系统盘必须至少40G func (self *SHuaweiHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { switch storage.StorageType { diff --git a/pkg/compute/hostdrivers/huawei_stack.go b/pkg/compute/hostdrivers/huawei_stack.go index 4575bd18aaf..58f2d5e2634 100644 --- a/pkg/compute/hostdrivers/huawei_stack.go +++ b/pkg/compute/hostdrivers/huawei_stack.go @@ -41,6 +41,10 @@ func (self *SHCSOHostDriver) GetHypervisor() string { return api.HYPERVISOR_HCSO } +func (self *SHCSOHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_HCSO +} + // 系统盘必须至少40G func (self *SHCSOHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { switch storage.StorageType { diff --git a/pkg/compute/hostdrivers/incloudsphere.go b/pkg/compute/hostdrivers/incloudsphere.go index fd069577a25..8f358162a23 100644 --- a/pkg/compute/hostdrivers/incloudsphere.go +++ b/pkg/compute/hostdrivers/incloudsphere.go @@ -39,6 +39,10 @@ func (self *SInCloudSphereHostDriver) GetHypervisor() string { return api.HYPERVISOR_INCLOUD_SPHERE } +func (self *SInCloudSphereHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_INCLOUD_SPHERE +} + func (self *SInCloudSphereHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/jdcloud.go b/pkg/compute/hostdrivers/jdcloud.go index f4a74f8eba2..4f735556961 100644 --- a/pkg/compute/hostdrivers/jdcloud.go +++ b/pkg/compute/hostdrivers/jdcloud.go @@ -41,6 +41,10 @@ func (self *SJDcloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_JDCLOUD } +func (self *SJDcloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_JDCLOUD +} + // ValidateResetDisk 仅可用状态的云硬盘支持恢复 // 卸载硬盘需要停止云主机 func (self *SJDcloudHostDriver) ValidateResetDisk(ctx context.Context, userCred mcclient.TokenCredential, disk *models.SDisk, snapshot *models.SSnapshot, guests []models.SGuest, input *api.DiskResetInput) (*api.DiskResetInput, error) { diff --git a/pkg/compute/hostdrivers/ksyun.go b/pkg/compute/hostdrivers/ksyun.go index 49594c4dffb..6a7ff3d34d4 100644 --- a/pkg/compute/hostdrivers/ksyun.go +++ b/pkg/compute/hostdrivers/ksyun.go @@ -35,3 +35,7 @@ func (self *SKsyunHostDriver) GetHostType() string { func (self *SKsyunHostDriver) GetHypervisor() string { return api.HYPERVISOR_KSYUN } + +func (self *SKsyunHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_KSYUN +} diff --git a/pkg/compute/hostdrivers/kvm.go b/pkg/compute/hostdrivers/kvm.go index bf2bd237b51..d4c33882474 100644 --- a/pkg/compute/hostdrivers/kvm.go +++ b/pkg/compute/hostdrivers/kvm.go @@ -59,6 +59,10 @@ func (self *SKVMHostDriver) GetHypervisor() string { return api.HYPERVISOR_KVM } +func (self *SKVMHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ONECLOUD +} + func (self *SKVMHostDriver) validateGPFS(ctx context.Context, userCred mcclient.TokenCredential, host *models.SHost, input api.HostStorageCreateInput) (api.HostStorageCreateInput, error) { header := http.Header{} header.Set(mcclient.AUTH_TOKEN, userCred.GetTokenString()) diff --git a/pkg/compute/hostdrivers/managedvirtual.go b/pkg/compute/hostdrivers/managedvirtual.go index 1e24bba2725..2f3c76cbd53 100644 --- a/pkg/compute/hostdrivers/managedvirtual.go +++ b/pkg/compute/hostdrivers/managedvirtual.go @@ -368,7 +368,11 @@ func (self *SManagedVirtualizationHostDriver) RequestRebuildDiskOnStorage(ctx co } func (driver *SManagedVirtualizationHostDriver) IsReachStoragecacheCapacityLimit(host *models.SHost, cachedImages []models.SCachedimage) bool { - quota := host.GetHostDriver().GetStoragecacheQuota(host) + hostDriver, err := host.GetHostDriver() + if err != nil { + return false + } + quota := hostDriver.GetStoragecacheQuota(host) log.Debugf("Cached image total: %d quota: %d", len(cachedImages), quota) if quota > 0 && len(cachedImages) >= quota { return true diff --git a/pkg/compute/hostdrivers/nutanix.go b/pkg/compute/hostdrivers/nutanix.go index 95286280a10..d4b27022cc8 100644 --- a/pkg/compute/hostdrivers/nutanix.go +++ b/pkg/compute/hostdrivers/nutanix.go @@ -36,6 +36,10 @@ func (self *SNutanixHostDriver) GetHypervisor() string { return api.HYPERVISOR_NUTANIX } +func (self *SNutanixHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_NUTANIX +} + func (self *SNutanixHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/openstack.go b/pkg/compute/hostdrivers/openstack.go index 81070c0c956..955f45cd949 100644 --- a/pkg/compute/hostdrivers/openstack.go +++ b/pkg/compute/hostdrivers/openstack.go @@ -40,6 +40,10 @@ func (self *SOpenStackHostDriver) GetHypervisor() string { return api.HYPERVISOR_OPENSTACK } +func (self *SOpenStackHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_OPENSTACK +} + func (self *SOpenStackHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/oracle.go b/pkg/compute/hostdrivers/oracle.go index 3a31a94615f..05bbcdcdff7 100644 --- a/pkg/compute/hostdrivers/oracle.go +++ b/pkg/compute/hostdrivers/oracle.go @@ -35,3 +35,7 @@ func (self *SOracleHostDriver) GetHostType() string { func (self *SOracleHostDriver) GetHypervisor() string { return api.HYPERVISOR_ORACLE } + +func (self *SOracleHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ORACLE +} diff --git a/pkg/compute/hostdrivers/proxmox.go b/pkg/compute/hostdrivers/proxmox.go index a482571bc36..795a449260f 100644 --- a/pkg/compute/hostdrivers/proxmox.go +++ b/pkg/compute/hostdrivers/proxmox.go @@ -53,6 +53,10 @@ func (self *SProxmoxHostDriver) GetHypervisor() string { return api.HYPERVISOR_PROXMOX } +func (self *SProxmoxHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_NUTANIX +} + func (self *SProxmoxHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/qcloud.go b/pkg/compute/hostdrivers/qcloud.go index 6b282b06016..a885d01a507 100644 --- a/pkg/compute/hostdrivers/qcloud.go +++ b/pkg/compute/hostdrivers/qcloud.go @@ -44,6 +44,10 @@ func (self *SQcloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_QCLOUD } +func (self *SQcloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_QCLOUD +} + func (self *SQcloudHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if sizeGb%10 != 0 { return fmt.Errorf("The disk size must be a multiple of 10Gb") diff --git a/pkg/compute/hostdrivers/qingcloud.go b/pkg/compute/hostdrivers/qingcloud.go index 87dd7595120..e04ffd1d82c 100644 --- a/pkg/compute/hostdrivers/qingcloud.go +++ b/pkg/compute/hostdrivers/qingcloud.go @@ -35,3 +35,7 @@ func (self *SQingCloudHostDriver) GetHostType() string { func (self *SQingCloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_QINGCLOUD } + +func (self *SQingCloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_QINGCLOUD +} diff --git a/pkg/compute/hostdrivers/remotefile.go b/pkg/compute/hostdrivers/remotefile.go index 6f83207c167..78ba863af88 100644 --- a/pkg/compute/hostdrivers/remotefile.go +++ b/pkg/compute/hostdrivers/remotefile.go @@ -36,6 +36,10 @@ func (self *SRemoteFileDriver) GetHypervisor() string { return api.HYPERVISOR_REMOTEFILE } +func (self *SRemoteFileDriver) GetProvider() string { + return api.CLOUD_PROVIDER_REMOTEFILE +} + func (self *SRemoteFileDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/hostdrivers/ucloud.go b/pkg/compute/hostdrivers/ucloud.go index 80257a8553d..5e57d87438e 100644 --- a/pkg/compute/hostdrivers/ucloud.go +++ b/pkg/compute/hostdrivers/ucloud.go @@ -41,6 +41,10 @@ func (self *SUCloudHostDriver) GetHypervisor() string { return api.HYPERVISOR_UCLOUD } +func (self *SUCloudHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_UCLOUD +} + func (self *SUCloudHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if storage.StorageType == api.STORAGE_UCLOUD_CLOUD_NORMAL { if sizeGb < 20 || sizeGb > 8000 { diff --git a/pkg/compute/hostdrivers/volcengine.go b/pkg/compute/hostdrivers/volcengine.go index ea020a7e538..4da3ac5b736 100644 --- a/pkg/compute/hostdrivers/volcengine.go +++ b/pkg/compute/hostdrivers/volcengine.go @@ -44,6 +44,10 @@ func (self *SVolcengineHostDriver) GetHypervisor() string { return api.HYPERVISOR_VOLCENGINE } +func (self *SVolcengineHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_VOLCENGINE +} + func (self *SVolcengineHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { if sizeGb%10 != 0 { return fmt.Errorf("The disk size must be a multiple of 10Gb") diff --git a/pkg/compute/hostdrivers/zstack.go b/pkg/compute/hostdrivers/zstack.go index f5e2a9231ae..b85602a8c4a 100644 --- a/pkg/compute/hostdrivers/zstack.go +++ b/pkg/compute/hostdrivers/zstack.go @@ -40,6 +40,10 @@ func (self *SZStackHostDriver) GetHypervisor() string { return api.HYPERVISOR_ZSTACK } +func (self *SZStackHostDriver) GetProvider() string { + return api.CLOUD_PROVIDER_ZSTACK +} + func (self *SZStackHostDriver) ValidateDiskSize(storage *models.SStorage, sizeGb int) error { return nil } diff --git a/pkg/compute/models/capabilities.go b/pkg/compute/models/capabilities.go index 75573be86e1..454cb29ba74 100644 --- a/pkg/compute/models/capabilities.go +++ b/pkg/compute/models/capabilities.go @@ -23,6 +23,7 @@ import ( "yunion.io/x/cloudmux/pkg/cloudprovider" "yunion.io/x/jsonutils" "yunion.io/x/log" + "yunion.io/x/pkg/errors" "yunion.io/x/pkg/tristate" "yunion.io/x/pkg/util/rbacscope" "yunion.io/x/pkg/utils" @@ -38,7 +39,8 @@ import ( ) type SCapabilities struct { - Hypervisors []string `json:",allowempty"` + Hypervisors []string `json:",allowempty"` + HypervisorInfo map[string][]string `json:",allowempty"` Brands []string `json:",allowempty"` EnabledBrands []string `json:",allowempty"` @@ -126,8 +128,6 @@ type SCapabilities struct { ReadOnlyDisabledSnapshotPolicyBrands []string `json:",allowempty"` ResourceTypes []string `json:",allowempty"` - StorageTypes []string `json:",allowempty"` // going to remove on 2.14 - DataStorageTypes []string `json:",allowempty"` // going to remove on 2.14 GPUModels []string `json:",allowempty"` // Deprecated by PCIModelTypes PCIModelTypes []PCIDevModelTypes `json:",allowempty"` HostCpuArchs []string `json:",allowempty"` // x86_64 aarch64 @@ -146,20 +146,24 @@ type SCapabilities struct { Specs jsonutils.JSONObject AvailableHostCount int + *StorageInfos + InstanceCapabilities []cloudprovider.SInstanceCapability +} + +type StorageInfos struct { StorageTypes2 map[string][]string `json:",allowempty"` StorageTypes3 map[string]map[string]*SimpleStorageInfo `json:",allowempty"` DataStorageTypes2 map[string][]string `json:",allowempty"` DataStorageTypes3 map[string]map[string]*SimpleStorageInfo `json:",allowempty"` - - InstanceCapabilities []cloudprovider.SInstanceCapability } func GetDiskCapabilities(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, region *SCloudregion, zone *SZone) (SCapabilities, error) { capa := SCapabilities{} - s1, d1, s2, s3, d2, d3 := getStorageTypes(ctx, userCred, region, zone, "") - capa.StorageTypes, capa.DataStorageTypes = s1, d1 - capa.StorageTypes2, capa.StorageTypes3 = s2, s3 - capa.DataStorageTypes2, capa.DataStorageTypes3 = d2, d3 + var err error + capa.StorageInfos, err = getStorageTypes(ctx, userCred, region, zone, "") + if err != nil { + return capa, errors.Wrapf(err, "getStorageTypes") + } capa.MinDataDiskCount = getMinDataDiskCount(region, zone) capa.MaxDataDiskCount = getMaxDataDiskCount(region, zone) return capa, nil @@ -194,21 +198,34 @@ func GetCapabilities(ctx context.Context, userCred mcclient.TokenCredential, que } domainId = "" } - capa.Hypervisors = getHypervisors(ctx, userCred, region, zone, domainId) + var err error + capa.HypervisorInfo, err = getHypervisors(ctx, userCred, region, zone, domainId) + if err != nil { + return capa, errors.Wrapf(err, "getHypervisors") + } + capa.Hypervisors = []string{} + for _, hypervisors := range capa.HypervisorInfo { + for _, h := range hypervisors { + if !utils.IsInStringArray(h, capa.Hypervisors) { + capa.Hypervisors = append(capa.Hypervisors, h) + } + } + } capa.InstanceCapabilities = []cloudprovider.SInstanceCapability{} - for _, hypervisor := range capa.Hypervisors { - driver := GetDriver(hypervisor) - if driver != nil { - capa.InstanceCapabilities = append(capa.InstanceCapabilities, driver.GetInstanceCapability()) + for provider, hypervisors := range capa.HypervisorInfo { + for _, hypervisor := range hypervisors { + driver, _ := GetDriver(hypervisor, provider) + if driver != nil { + capa.InstanceCapabilities = append(capa.InstanceCapabilities, driver.GetInstanceCapability()) + } } } getBrands(region, zone, domainId, &capa) - // capa.Brands, capa.ComputeEngineBrands, capa.NetworkManageBrands, capa.ObjectStorageBrands = a, c, n, o capa.ResourceTypes = getResourceTypes(ctx, userCred, region, zone, domainId) - s1, d1, s2, s3, d2, d3 := getStorageTypes(ctx, userCred, region, zone, domainId) - capa.StorageTypes, capa.DataStorageTypes = s1, d1 - capa.StorageTypes2, capa.StorageTypes3 = s2, s3 - capa.DataStorageTypes2, capa.DataStorageTypes3 = d2, d3 + capa.StorageInfos, err = getStorageTypes(ctx, userCred, region, zone, domainId) + if err != nil { + return capa, errors.Wrapf(err, "getStorageTypes") + } capa.GPUModels, capa.PCIModelTypes = getIsolatedDeviceInfo(ctx, userCred, region, zone, domainId) capa.SchedPolicySupport = isSchedPolicySupported(region, zone) capa.MinNicCount = getMinNicCount(region, zone) @@ -230,7 +247,6 @@ func GetCapabilities(ctx context.Context, userCred mcclient.TokenCredential, que if len(domainId) > 0 { query.(*jsonutils.JSONDict).Add(jsonutils.NewString(domainId), "domain_id") } - var err error serverType := jsonutils.GetAnyString(query, []string{"host_type", "server_type"}) autoAllocNetworkCount, _ := getAutoAllocNetworkCount(ctx, userCred, ownerId, scope, region, zone, serverType) capa.PublicNetworkCount = autoAllocNetworkCount @@ -483,46 +499,58 @@ func getBrands(region *SCloudregion, zone *SZone, domainId string, capa *SCapabi return } -func getHypervisors(ctx context.Context, userCred mcclient.TokenCredential, region *SCloudregion, zone *SZone, domainId string) []string { - q := HostManager.Query("host_type", "manager_id") - if region != nil { - subq := getRegionZoneSubq(region) - q = q.Filter(sqlchemy.In(q.Field("zone_id"), subq)) - } +func getHypervisors(ctx context.Context, userCred mcclient.TokenCredential, region *SCloudregion, zone *SZone, domainId string) (map[string][]string, error) { + q := HostManager.Query().IsNotEmpty("host_type").IsTrue("enabled") if zone != nil { q = q.Equals("zone_id", zone.Id) } + zoneQ := ZoneManager.Query() + if region != nil { + zoneQ = zoneQ.Equals("cloudregion_id", region.Id) + } if len(domainId) > 0 { ownerId := &db.SOwnerId{DomainId: domainId} q = HostManager.FilterByOwner(ctx, q, HostManager, userCred, ownerId, rbacscope.ScopeDomain) - /*subq := getDomainManagerSubq(domainId) - q = q.Filter(sqlchemy.OR( - sqlchemy.In(q.Field("manager_id"), subq), - sqlchemy.IsNullOrEmpty(q.Field("manager_id")), - ))*/ } - q = q.IsNotEmpty("host_type").IsNotNull("host_type") - // q = q.Equals("host_status", HOST_ONLINE) - q = q.IsTrue("enabled") - q = q.Distinct() - rows, err := q.Rows() + + zones := zoneQ.SubQuery() + regions := CloudregionManager.Query().SubQuery() + + sq := q.SubQuery() + hQ := sq.Query( + sq.Field("host_type"), + regions.Field("provider"), + ) + + hQ = hQ.Join(zones, sqlchemy.Equals(hQ.Field("zone_id"), zones.Field("id"))) + hQ = hQ.Join(regions, sqlchemy.Equals(zones.Field("cloudregion_id"), regions.Field("id"))) + + result := []struct { + HostType string + Provider string + }{} + + hQ = hQ.Distinct() + + err := hQ.All(&result) if err != nil { - return nil + return nil, err } - defer rows.Close() - hypervisors := make([]string, 0) - for rows.Next() { - var hostType string - var managerId string - rows.Scan(&hostType, &managerId) - if len(hostType) > 0 && IsProviderAccountEnabled(managerId) { - hypervisor := api.HOSTTYPE_HYPERVISOR[hostType] - if !utils.IsInStringArray(hypervisor, hypervisors) { - hypervisors = append(hypervisors, hypervisor) - } + ret := map[string][]string{} + for _, h := range result { + drv, err := GetHostDriver(h.HostType, h.Provider) + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") + } + _, ok := ret[h.Provider] + if !ok { + ret[h.Provider] = []string{} + } + if !utils.IsInStringArray(drv.GetHypervisor(), ret[h.Provider]) { + ret[h.Provider] = append(ret[h.Provider], drv.GetHypervisor()) } } - return hypervisors + return ret, nil } func getResourceTypes(ctx context.Context, userCred mcclient.TokenCredential, region *SCloudregion, zone *SZone, domainId string) []string { @@ -579,6 +607,7 @@ type StorageInfo struct { FreeCapacity int64 IsSysDiskStore bool HostType string + Provider string } type sStorage struct { @@ -602,12 +631,8 @@ func getStorageTypes( ctx context.Context, userCred mcclient.TokenCredential, region *SCloudregion, zone *SZone, domainId string, -) ( - []string, []string, - map[string][]string, map[string]map[string]*SimpleStorageInfo, - map[string][]string, map[string]map[string]*SimpleStorageInfo, -) { - storages := StorageManager.Query().SubQuery() +) (*StorageInfos, error) { + storageQ := StorageManager.Query() disks1 := DiskManager.Query().SubQuery() usedDisk := disks1.Query( disks1.Field("storage_id"), @@ -627,6 +652,18 @@ func getStorageTypes( } hosts := hostQuery.SubQuery() + zoneQ := ZoneManager.Query() + if zone != nil { + zoneQ = zoneQ.Equals("id", zone.Id) + } + if region != nil { + zoneQ = zoneQ.Equals("cloudregion_id", region.Id) + } + zones := zoneQ.SubQuery() + regions := CloudregionManager.Query().SubQuery() + + storages := storageQ.SubQuery() + q := storages.Query( storages.Field("id"), storages.Field("name"), @@ -639,7 +676,9 @@ func getStorageTypes( failedDisk.Field("waste_capacity"), storages.Field("is_sys_disk_store"), hosts.Field("host_type"), + regions.Field("provider"), ) + q = q.LeftJoin(usedDisk, sqlchemy.Equals(usedDisk.Field("storage_id"), storages.Field("id"))) q = q.LeftJoin(failedDisk, sqlchemy.Equals(failedDisk.Field("storage_id"), storages.Field("id"))) @@ -647,17 +686,22 @@ func getStorageTypes( hostStorages.Field("storage_id"), storages.Field("id"), )) + q = q.Join(hosts, sqlchemy.Equals( hosts.Field("id"), hostStorages.Field("host_id"), )) - if region != nil { - subq := getRegionZoneSubq(region) - q = q.Filter(sqlchemy.In(storages.Field("zone_id"), subq)) - } - if zone != nil { - q = q.Filter(sqlchemy.Equals(storages.Field("zone_id"), zone.Id)) - } + + q = q.Join(zones, sqlchemy.Equals( + storages.Field("zone_id"), + zones.Field("id"), + )) + + q = q.Join(regions, sqlchemy.Equals( + zones.Field("cloudregion_id"), + regions.Field("id"), + )) + if len(domainId) > 0 { ownerId := &db.SOwnerId{DomainId: domainId} q = StorageManager.FilterByOwner(ctx, q, StorageManager, userCred, ownerId, rbacscope.ScopeDomain) @@ -670,34 +714,14 @@ func getStorageTypes( q = q.Filter(sqlchemy.In(storages.Field("status"), []string{api.STORAGE_ENABLED, api.STORAGE_ONLINE})) q = q.Filter(sqlchemy.IsTrue(storages.Field("enabled"))) q = q.Filter(sqlchemy.NotEquals(hosts.Field("host_type"), api.HOST_TYPE_BAREMETAL)) - rows, err := q.Rows() - if err != nil { - log.Errorf("get storage types failed %s", err) - return nil, nil, nil, nil, nil, nil - } - defer rows.Close() + ret := &StorageInfos{ + StorageTypes2: map[string][]string{}, + StorageTypes3: map[string]map[string]*SimpleStorageInfo{}, + DataStorageTypes2: map[string][]string{}, + DataStorageTypes3: map[string]map[string]*SimpleStorageInfo{}, + } var ( - sysStorageTypes = make([]string, 0) - allStorageTypes = make([]string, 0) - storageInfos = make(map[string]*SimpleStorageInfo) - sysHypervisorStorageTypes = make(map[string][]string) - allHypervisorStorageTypes = make(map[string][]string) - sysHypervisorStorageInfos = make(map[string]map[string]*SimpleStorageInfo) - allHypervisorStorageInfos = make(map[string]map[string]*SimpleStorageInfo) - - setStorageTypes = func(storageHypervisor, storageType string, hypervisorStorageTypes map[string][]string) { - sts, ok := hypervisorStorageTypes[storageHypervisor] - if !ok { - sts = make([]string, 0) - } - - if !utils.IsInStringArray(storageType, sts) { - sts = append(sts, storageType) - } - - hypervisorStorageTypes[storageHypervisor] = sts - } addStorageInfo = func(storage *StorageInfo, simpleStorage *SimpleStorageInfo) { simpleStorage.VirtualCapacity += storage.VirtualCapacity simpleStorage.FreeCapacity += storage.FreeCapacity @@ -707,10 +731,10 @@ func getStorageTypes( simpleStorage.UsedCapacity += storage.UsedCapacity.Int64 simpleStorage.Storages = append(simpleStorage.Storages, sStorage{Id: storage.Id, Name: storage.Name}) } - setStorageInfos = func(storageHypervisor, storageType string, storage *StorageInfo, - hypervisorStorageInfos map[string]map[string]*SimpleStorageInfo) bool { + setStorageInfos = func(hostDriver IHostDriver, storageType string, storage *StorageInfo, + storageInfos map[string]map[string]*SimpleStorageInfo) bool { var notFound bool - sfs, ok := hypervisorStorageInfos[storageHypervisor] + sfs, ok := storageInfos[hostDriver.GetHypervisor()] if !ok { sfs = make(map[string]*SimpleStorageInfo) notFound = true @@ -720,62 +744,62 @@ func getStorageTypes( notFound = true simpleStorage = &SimpleStorageInfo{Storages: []sStorage{}} } - if !utils.IsInStringArray(storageHypervisor, api.PUBLIC_CLOUD_HYPERVISORS) { + if !utils.IsInStringArray(hostDriver.GetProvider(), api.PUBLIC_CLOUD_PROVIDERS) { addStorageInfo(storage, simpleStorage) sfs[storageType] = simpleStorage - hypervisorStorageInfos[storageHypervisor] = sfs + storageInfos[hostDriver.GetHypervisor()] = sfs } return notFound } ) - for rows.Next() { - var storage StorageInfo - err := rows.Scan( - &storage.Id, &storage.Name, - &storage.Capacity, &storage.Reserved, - &storage.StorageType, &storage.MediumType, - &storage.Cmtbound, &storage.UsedCapacity, - &storage.WasteCapacity, &storage.IsSysDiskStore, - &storage.HostType, - ) + info := []StorageInfo{} + + q.DebugQuery() + + err := q.All(&info) + if err != nil { + return nil, errors.Wrapf(err, "q.All") + } + + for i := range info { + storage := info[i] + if len(storage.Provider) == 0 { + storage.Provider = api.CLOUD_PROVIDER_ONECLOUD + } + + hostDriver, err := GetHostDriver(storage.HostType, storage.Provider) if err != nil { - log.Errorf("Scan storage rows %s", err) - return nil, nil, nil, nil, nil, nil + return nil, errors.Wrapf(err, "GetHostDriver") } - storageHypervisor := api.HOSTTYPE_HYPERVISOR[storage.HostType] if len(storage.StorageType) > 0 && len(storage.MediumType) > 0 { storageType := fmt.Sprintf("%s/%s", storage.StorageType, storage.MediumType) - simpleStorage, ok := storageInfos[storageType] + _, ok := ret.StorageTypes2[hostDriver.GetHypervisor()] if !ok { - simpleStorage = &SimpleStorageInfo{Storages: []sStorage{}} - if storage.IsSysDiskStore { - sysStorageTypes = append(sysStorageTypes, storageType) - } - allStorageTypes = append(allStorageTypes, storageType) + ret.StorageTypes2[hostDriver.GetHypervisor()] = []string{} + } + if !utils.IsInStringArray(storageType, ret.StorageTypes2[hostDriver.GetHypervisor()]) { + ret.StorageTypes2[hostDriver.GetHypervisor()] = append(ret.StorageTypes2[hostDriver.GetHypervisor()], storageType) } + + simpleStorage := &SimpleStorageInfo{Storages: []sStorage{}} if storage.Cmtbound.Float64 == 0 { storage.Cmtbound.Float64 = float64(options.Options.DefaultStorageOvercommitBound) } storage.VirtualCapacity = int64(float64(storage.Capacity-storage.Reserved.Int64) * storage.Cmtbound.Float64) storage.FreeCapacity = storage.VirtualCapacity - storage.UsedCapacity.Int64 - storage.WasteCapacity.Int64 addStorageInfo(&storage, simpleStorage) - storageInfos[storageType] = simpleStorage // set hypervisor storage types and infos if storage.IsSysDiskStore { - if setStorageInfos(storageHypervisor, storageType, &storage, sysHypervisorStorageInfos) { - setStorageTypes(storageHypervisor, storageType, sysHypervisorStorageTypes) - } - } - if setStorageInfos(storageHypervisor, storageType, &storage, allHypervisorStorageInfos) { - setStorageTypes(storageHypervisor, storageType, allHypervisorStorageTypes) + setStorageInfos(hostDriver, storageType, &storage, ret.StorageTypes3) } + setStorageInfos(hostDriver, storageType, &storage, ret.DataStorageTypes3) } + } - return sysStorageTypes, allStorageTypes, - sysHypervisorStorageTypes, sysHypervisorStorageInfos, - allHypervisorStorageTypes, allHypervisorStorageInfos + + return ret, nil } type PCIDevModelTypes struct { diff --git a/pkg/compute/models/cloudaccounts.go b/pkg/compute/models/cloudaccounts.go index 211e3a8b78a..859d8706993 100644 --- a/pkg/compute/models/cloudaccounts.go +++ b/pkg/compute/models/cloudaccounts.go @@ -2785,32 +2785,6 @@ func (manager *SCloudaccountManager) getBrandsOfProvider(provider string) ([]str return ret, nil } -func guessBrandForHypervisor(hypervisor string) string { - if hypervisor == "" { - return api.HYPERVISOR_KVM - } - driver := GetDriver(hypervisor) - if driver == nil { - log.Errorf("guestBrandFromHypervisor: fail to find driver for hypervisor %s", hypervisor) - return "" - } - provider := driver.GetProvider() - if len(provider) == 0 { - log.Errorf("guestBrandFromHypervisor: fail to find provider for hypervisor %s", hypervisor) - return "" - } - brands, err := CloudaccountManager.getBrandsOfProvider(provider) - if err != nil { - log.Errorf("guestBrandFromHypervisor: fail to find brands for hypervisor %s", hypervisor) - return "" - } - if len(brands) != 1 { - log.Errorf("guestBrandFromHypervisor: find mistached number of brands for hypervisor %s %s", hypervisor, brands) - return "" - } - return brands[0] -} - func (account *SCloudaccount) PerformSyncSkus(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.CloudaccountSyncSkusInput) (jsonutils.JSONObject, error) { if !account.GetEnabled() { return nil, httperrors.NewInvalidStatusError("Account disabled") diff --git a/pkg/compute/models/containers.go b/pkg/compute/models/containers.go index b18aea4e31e..b85884f11f2 100644 --- a/pkg/compute/models/containers.go +++ b/pkg/compute/models/containers.go @@ -532,7 +532,11 @@ func (c *SContainer) StartSaveVolumeMountImage(ctx context.Context, userCred mcc } func (c *SContainer) GetPodDriver() IPodDriver { - return c.GetPod().GetDriver().(IPodDriver) + driver, err := c.GetPod().GetDriver() + if err != nil { + return nil + } + return driver.(IPodDriver) } func (c *SContainer) GetDetailsExecInfo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (*api.ContainerExecInfoOutput, error) { diff --git a/pkg/compute/models/disks.go b/pkg/compute/models/disks.go index 677f658705c..1d99ed66bfd 100644 --- a/pkg/compute/models/disks.go +++ b/pkg/compute/models/disks.go @@ -421,7 +421,7 @@ func (self *SDisk) CustomizeCreate(ctx context.Context, userCred mcclient.TokenC return self.SVirtualResourceBase.CustomizeCreate(ctx, userCred, ownerId, query, data) } -func (self *SDisk) ValidateUpdateData(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.DiskUpdateInput) (api.DiskUpdateInput, error) { +func (self *SDisk) ValidateUpdateData(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input *api.DiskUpdateInput) (*api.DiskUpdateInput, error) { var err error if input.DiskType != "" { @@ -435,12 +435,17 @@ func (self *SDisk) ValidateUpdateData(ctx context.Context, userCred mcclient.Tok return input, httperrors.NewNotFoundError("failed to find storage for disk %s", self.Name) } - host, _ := storage.GetMasterHost() - if host == nil { - return input, httperrors.NewNotFoundError("failed to find host for storage %s with disk %s", storage.Name, self.Name) + host, err := storage.GetMasterHost() + if err != nil { + return nil, errors.Wrapf(err, "GetMasterHost") + } + + driver, err := host.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") } - input, err = host.GetHostDriver().ValidateUpdateDisk(ctx, userCred, input) + input, err = driver.ValidateUpdateDisk(ctx, userCred, input) if err != nil { return input, errors.Wrap(err, "GetHostDriver().ValidateUpdateDisk") } @@ -461,36 +466,48 @@ func (man *SDiskManager) BatchCreateValidateCreateData(ctx context.Context, user return input.JSON(input), nil } -func diskCreateInput2ComputeQuotaKeys(input api.DiskCreateInput, ownerId mcclient.IIdentityProvider) SComputeResourceKeys { - // input.Hypervisor must be set - brand := guessBrandForHypervisor(input.Hypervisor) - keys := GetDriver(input.Hypervisor).GetComputeQuotaKeys( - rbacscope.ScopeProject, - ownerId, - brand, - ) +func diskCreateInput2ComputeQuotaKeys(input api.DiskCreateInput, ownerId mcclient.IIdentityProvider) (SComputeResourceKeys, error) { + var keys SComputeResourceKeys if len(input.PreferHost) > 0 { - hostObj, _ := HostManager.FetchById(input.PreferHost) + hostObj, err := HostManager.FetchById(input.PreferHost) + if err != nil { + return keys, err + } host := hostObj.(*SHost) - zone, _ := host.GetZone() - keys.ZoneId = zone.Id - keys.RegionId = zone.CloudregionId - } else if len(input.PreferZone) > 0 { - zoneObj, _ := ZoneManager.FetchById(input.PreferZone) - zone := zoneObj.(*SZone) - keys.ZoneId = zone.Id - keys.RegionId = zone.CloudregionId - } else if len(input.PreferWire) > 0 { - wireObj, _ := WireManager.FetchById(input.PreferWire) + input.PreferZone = host.ZoneId + keys.ZoneId = host.ZoneId + } + if len(input.PreferWire) > 0 { + wireObj, err := WireManager.FetchById(input.PreferWire) + if err != nil { + return keys, err + } wire := wireObj.(*SWire) - zone, _ := wire.GetZone() + if len(wire.ZoneId) > 0 { + input.PreferZone = wire.ZoneId + keys.ZoneId = wire.ZoneId + } + } + if len(input.PreferZone) > 0 { + zoneObj, err := ZoneManager.FetchById(input.PreferZone) + if err != nil { + return keys, err + } + zone := zoneObj.(*SZone) + input.PreferRegion = zone.CloudregionId keys.ZoneId = zone.Id keys.RegionId = zone.CloudregionId - } else if len(input.PreferRegion) > 0 { - regionObj, _ := CloudregionManager.FetchById(input.PreferRegion) - keys.RegionId = regionObj.GetId() } - return keys + if len(input.PreferRegion) > 0 { + regionObj, err := CloudregionManager.FetchById(input.PreferRegion) + if err != nil { + return keys, err + } + region := regionObj.(*SCloudregion) + keys.RegionId = region.GetId() + keys.Brand = region.Provider + } + return keys, nil } func (manager *SDiskManager) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, input api.DiskCreateInput) (api.DiskCreateInput, error) { @@ -522,11 +539,15 @@ func (manager *SDiskManager) ValidateCreateData(ctx context.Context, userCred mc return input, httperrors.NewResourceNotReadyError("cloudprovider %s not available", provider.Name) } - host, _ := storage.GetMasterHost() - if host == nil { - return input, httperrors.NewResourceNotFoundError("storage %s(%s) need online and attach host for create disk", storage.Name, storage.Id) + host, err := storage.GetMasterHost() + if err != nil { + return input, errors.Wrapf(err, "GetMasterHost") + } + hostDriver, err := host.GetHostDriver() + if err != nil { + return input, errors.Wrapf(err, "GetHostDriver") } - input.Hypervisor = host.GetHostDriver().GetHypervisor() + input.Hypervisor = hostDriver.GetHypervisor() if len(diskConfig.Backend) == 0 { diskConfig.Backend = storage.StorageType } @@ -571,7 +592,7 @@ func (manager *SDiskManager) ValidateCreateData(ctx context.Context, userCred mc encInput := input.EncryptedResourceCreateInput input = *serverInput.ToDiskCreateInput() input.EncryptedResourceCreateInput = encInput - quotaKey = diskCreateInput2ComputeQuotaKeys(input, ownerId) + quotaKey, _ = diskCreateInput2ComputeQuotaKeys(input, ownerId) } input.VirtualResourceCreateInput, err = manager.SVirtualResourceBaseManager.ValidateCreateData(ctx, userCred, ownerId, query, input.VirtualResourceCreateInput) @@ -611,10 +632,15 @@ func (manager *SDiskManager) validateDiskOnStorage(diskConfig *api.DiskConfig, s var guestdriver IGuestDriver = nil if host, _ := storage.GetMasterHost(); host != nil { //公有云磁盘大小检查。 - if err := host.GetHostDriver().ValidateDiskSize(storage, diskConfig.SizeMb>>10); err != nil { + hostDriver, err := host.GetHostDriver() + if err != nil { + return errors.Wrapf(err, "GetHostDriver") + } + if err := hostDriver.ValidateDiskSize(storage, diskConfig.SizeMb>>10); err != nil { return httperrors.NewInputParameterError("%v", err) } - guestdriver = GetDriver(api.HOSTTYPE_HYPERVISOR[host.HostType]) + + guestdriver, _ = GetDriver(hostDriver.GetHypervisor(), hostDriver.GetProvider()) } hoststorages := HoststorageManager.Query().SubQuery() hoststorage := make([]SHoststorage, 0) @@ -705,7 +731,7 @@ func getDiskResourceRequirements(ctx context.Context, userCred mcclient.TokenCre input.Hypervisor, ) } else { - quotaKey = diskCreateInput2ComputeQuotaKeys(input, ownerId) + quotaKey, _ = diskCreateInput2ComputeQuotaKeys(input, ownerId) } req.SetKeys(quotaKey) return req @@ -852,11 +878,15 @@ func (self *SDisk) StartAllocate(ctx context.Context, host *SHost, storage *SSto input.ExistingPath = ePath } + driver, err := host.GetHostDriver() + if err != nil { + return errors.Wrapf(err, "GetHostDriver") + } + if rebuild { - return host.GetHostDriver().RequestRebuildDiskOnStorage(ctx, host, storage, self, task, input) - } else { - return host.GetHostDriver().RequestAllocateDiskOnStorage(ctx, userCred, host, storage, self, task, input) + return driver.RequestRebuildDiskOnStorage(ctx, host, storage, self, task, input) } + return driver.RequestAllocateDiskOnStorage(ctx, userCred, host, storage, self, task, input) } // make snapshot after reset out of chain @@ -920,8 +950,13 @@ func (self *SDisk) PerformDiskReset(ctx context.Context, userCred mcclient.Token return nil, httperrors.NewBadRequestError("Cannot reset disk %s(%s),Snapshot is belong to disk %s", self.Name, self.Id, snapshot.DiskId) } + driver, err := host.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") + } + guests := self.GetGuests() - input, err = host.GetHostDriver().ValidateResetDisk(ctx, userCred, self, snapshot, guests, input) + input, err = driver.ValidateResetDisk(ctx, userCred, self, snapshot, guests, input) if err != nil { return nil, err } @@ -1038,7 +1073,10 @@ func (disk *SDisk) getHypervisor() string { if storage != nil { host, _ := storage.GetMasterHost() if host != nil { - return host.GetHostDriver().GetHypervisor() + driver, _ := host.GetHostDriver() + if driver != nil { + return driver.GetHypervisor() + } } } hypervisor := disk.GetMetadata(context.Background(), "hypervisor", nil) @@ -1085,10 +1123,14 @@ func (disk *SDisk) doResize(ctx context.Context, userCred mcclient.TokenCredenti var guestdriver IGuestDriver if host, _ := storage.GetMasterHost(); host != nil { - if err := host.GetHostDriver().ValidateDiskSize(storage, sizeMb>>10); err != nil { + hostDriver, err := host.GetHostDriver() + if err != nil { + return errors.Wrapf(err, "GetHostDriver") + } + if err := hostDriver.ValidateDiskSize(storage, sizeMb>>10); err != nil { return httperrors.NewInputParameterError("%v", err) } - guestdriver = GetDriver(api.HOSTTYPE_HYPERVISOR[host.HostType]) + guestdriver, _ = GetDriver(hostDriver.GetHypervisor(), hostDriver.GetProvider()) } if guestdriver == nil || guestdriver.DoScheduleStorageFilter() { if int64(addDisk) > storage.GetFreeCapacity() && !storage.IsEmulated { @@ -1910,7 +1952,7 @@ func totalDiskSize( hosts := HostManager.Query().SubQuery() q = q.Join(hoststorages, sqlchemy.Equals(storages.Field("id"), hoststorages.Field("storage_id"))) q = q.Join(hosts, sqlchemy.Equals(hoststorages.Field("host_id"), hosts.Field("id"))) - q = q.Filter(sqlchemy.In(hosts.Field("host_type"), api.Hypervisors2HostTypes(hypervisors))) + q = q.Filter(sqlchemy.In(hosts.Field("host_type"), Hypervisors2HostTypes(hypervisors))) } if !active.IsNone() { if active.IsTrue() { diff --git a/pkg/compute/models/elasticips.go b/pkg/compute/models/elasticips.go index b5ca5ad07e6..121a7f2999e 100644 --- a/pkg/compute/models/elasticips.go +++ b/pkg/compute/models/elasticips.go @@ -158,7 +158,11 @@ func (manager *SElasticipManager) ListItemFilter( return nil, httperrors.NewGeneralError(err) } guest := serverObj.(*SGuest) - if guest.Hypervisor == api.HYPERVISOR_KVM || (utils.IsInStringArray(guest.Hypervisor, api.PRIVATE_CLOUD_HYPERVISORS) && + region, err := guest.GetRegion() + if err != nil { + return nil, errors.Wrapf(err, "GetRegion") + } + if guest.Hypervisor == api.HYPERVISOR_KVM || (utils.IsInStringArray(region.Provider, api.PRIVATE_CLOUD_PROVIDERS) && guest.Hypervisor != api.HYPERVISOR_HCSO && guest.Hypervisor != api.HYPERVISOR_HCS) { zone, _ := guest.getZone() networks := NetworkManager.Query().SubQuery() diff --git a/pkg/compute/models/guest_actions.go b/pkg/compute/models/guest_actions.go index e6ce912b31f..629ef5a7d7b 100644 --- a/pkg/compute/models/guest_actions.go +++ b/pkg/compute/models/guest_actions.go @@ -81,7 +81,11 @@ func (self *SGuest) GetDetailsVnc(ctx context.Context, userCred mcclient.TokenCr if options.Options.ForceUseOriginVnc { input.Origin = true } - ret, err = self.GetDriver().GetGuestVncInfo(ctx, userCred, self, host, input) + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } + ret, err = driver.GetGuestVncInfo(ctx, userCred, self, host, input) if err != nil { return nil, err } @@ -156,11 +160,15 @@ func (self *SGuest) PerformEvent(ctx context.Context, userCred mcclient.TokenCre } func (self *SGuest) GetDetailsDesc(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (jsonutils.JSONObject, error) { - host, _ := self.GetHost() - if host == nil { - return nil, httperrors.NewInvalidStatusError("No host for server") + host, err := self.GetHost() + if err != nil { + return nil, errors.Wrapf(err, "GetHost") + } + driver, err := self.GetDriver() + if err != nil { + return nil, err } - return self.GetDriver().GetJsonDescAtHost(ctx, userCred, self, host, nil) + return driver.GetJsonDescAtHost(ctx, userCred, self, host, nil) } // 保存镜像 @@ -168,7 +176,10 @@ func (self *SGuest) PerformSaveImage(ctx context.Context, userCred mcclient.Toke if !utils.IsInStringArray(self.Status, []string{api.VM_READY, api.VM_RUNNING}) { return input, httperrors.NewInputParameterError("Cannot save image in status %s", self.Status) } - driver := self.GetDriver() + driver, err := self.GetDriver() + if err != nil { + return input, errors.Wrapf(err, "GetDriver") + } input.Restart = ((self.Status == api.VM_RUNNING) || input.AutoStart) && !driver.IsAllowSaveImageOnRunning() if len(input.Name) == 0 && len(input.GenerateName) == 0 { return input, httperrors.NewInputParameterError("Image name is required") @@ -189,7 +200,7 @@ func (self *SGuest) PerformSaveImage(ctx context.Context, userCred mcclient.Toke } } - factory, _ := cloudprovider.GetProviderFactory(self.GetDriver().GetProvider()) + factory, _ := cloudprovider.GetProviderFactory(driver.GetProvider()) if factory == nil || factory.IsOnPremise() { // OneCloud or VMware lockman.LockObject(ctx, disks.Root) defer lockman.ReleaseObject(ctx, disks.Root) @@ -209,7 +220,11 @@ func (self *SGuest) PerformSaveImage(ctx context.Context, userCred mcclient.Toke } func (self *SGuest) StartGuestSaveImage(ctx context.Context, userCred mcclient.TokenCredential, input api.ServerSaveImageInput, parentTaskId string) error { - return self.GetDriver().StartGuestSaveImage(ctx, userCred, self, jsonutils.Marshal(input).(*jsonutils.JSONDict), parentTaskId) + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + return driver.StartGuestSaveImage(ctx, userCred, self, jsonutils.Marshal(input).(*jsonutils.JSONDict), parentTaskId) } // 保存主机模板 @@ -315,7 +330,11 @@ func (self *SGuest) PerformSaveGuestImage(ctx context.Context, userCred mcclient } func (self *SGuest) StartGuestSaveGuestImage(ctx context.Context, userCred mcclient.TokenCredential, data *jsonutils.JSONDict, parentTaskId string) error { - return self.GetDriver().StartGuestSaveGuestImage(ctx, userCred, self, data, parentTaskId) + driver, err := self.GetDriver() + if err != nil { + return err + } + return driver.StartGuestSaveGuestImage(ctx, userCred, self, data, parentTaskId) } // 同步配置 @@ -384,12 +403,17 @@ func (self *SGuest) validateMigrate( isLiveMigrate = true } + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + if isLiveMigrate { // do live migrate check - if !self.GetDriver().IsSupportLiveMigrate() { + if !driver.IsSupportLiveMigrate() { return httperrors.NewNotAcceptableError("Not allow for hypervisor %s", self.GetHypervisor()) } - if err := self.GetDriver().CheckLiveMigrate(ctx, self, userCred, *liveMigrateInput); err != nil { + if err := driver.CheckLiveMigrate(ctx, self, userCred, *liveMigrateInput); err != nil { return err } if utils.IsInStringArray(self.Status, []string{api.VM_RUNNING, api.VM_SUSPEND}) { @@ -406,13 +430,13 @@ func (self *SGuest) validateMigrate( return httperrors.NewBadRequestError("Cannot live migrate in status %s", self.Status) } else { // do migrate check - if !self.GetDriver().IsSupportMigrate() { + if !driver.IsSupportMigrate() { return httperrors.NewNotAcceptableError("Not allow for hypervisor %s", self.GetHypervisor()) } if !migrateInput.IsRescueMode && self.Status != api.VM_READY { return httperrors.NewServerStatusError("Cannot normal migrate guest in status %s, try rescue mode or server-live-migrate?", self.Status) } - if err := self.GetDriver().CheckMigrate(ctx, self, userCred, *migrateInput); err != nil { + if err := driver.CheckMigrate(ctx, self, userCred, *migrateInput); err != nil { return err } if len(migrateInput.PreferHostId) > 0 { @@ -659,7 +683,12 @@ func (self *SGuest) PerformCancelLiveMigrate( return nil, httperrors.NewServerStatusError("cannot set migrate params in status %s", self.Status) } - return nil, self.GetDriver().RequestCancelLiveMigrate(ctx, self, userCred) + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } + + return nil, driver.RequestCancelLiveMigrate(ctx, self, userCred) } // 克隆虚拟机 @@ -671,7 +700,12 @@ func (self *SGuest) PerformClone(ctx context.Context, userCred mcclient.TokenCre return nil, httperrors.NewBadRequestError("Can't clone guest with backup guest") } - if !self.GetDriver().IsSupportGuestClone() { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + if !driver.IsSupportGuestClone() { return nil, httperrors.NewBadRequestError("Guest hypervisor %s does not support clone", self.Hypervisor) } @@ -832,13 +866,18 @@ func (self *SGuest) PerformDeploy( input.ResetPassword = true } + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } + // 变更密码/密钥时需要Restart才能生效。更新普通字段不需要Restart, Azure需要在运行状态下操作 doRestart := false if input.ResetPassword { - doRestart = self.GetDriver().IsNeedRestartForResetLoginInfo() + doRestart = driver.IsNeedRestartForResetLoginInfo() } - deployStatus, err := self.GetDriver().GetDeployStatus() + deployStatus, err := driver.GetDeployStatus() if err != nil { return nil, httperrors.NewInputParameterError("%v", err) } @@ -888,7 +927,13 @@ func (self *SGuest) ValidateAttachDisk(ctx context.Context, disk *SDisk) error { if disk.Status != api.DISK_READY { return httperrors.NewInputParameterError("Disk in %s not able to attach", disk.Status) } - guestStatus, err := self.GetDriver().GetAttachDiskStatus() + + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + + guestStatus, err := driver.GetAttachDiskStatus() if err != nil { return err } @@ -936,8 +981,14 @@ func (self *SGuest) PerformAttachdisk(ctx context.Context, userCred mcclient.Tok taskData.Add(jsonutils.NewInt(int64(*input.BootIndex)), "boot_index") } + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } + self.SetStatus(ctx, userCred, api.VM_ATTACH_DISK, "") - return nil, self.GetDriver().StartGuestAttachDiskTask(ctx, userCred, self, taskData, "") + + return nil, driver.StartGuestAttachDiskTask(ctx, userCred, self, taskData, "") } func (self *SGuest) StartRestartNetworkTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string, ip string, inBlockStream bool) error { @@ -1013,7 +1064,13 @@ func (self *SGuest) StartSuspendTask(ctx context.Context, userCred mcclient.Toke if err != nil { return err } - return self.GetDriver().StartSuspendTask(ctx, userCred, self, nil, parentTaskId) + + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + + return driver.StartSuspendTask(ctx, userCred, self, nil, parentTaskId) } // 恢复虚拟机挂起状态 @@ -1030,7 +1087,13 @@ func (self *SGuest) StartResumeTask(ctx context.Context, userCred mcclient.Token if err != nil { return err } - return self.GetDriver().StartResumeTask(ctx, userCred, self, nil, parentTaskId) + + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + + return driver.StartResumeTask(ctx, userCred, self, nil, parentTaskId) } // 开机 @@ -1062,7 +1125,11 @@ func (self *SGuest) PerformStart( } if self.isAllDisksReady() { kwargs := jsonutils.Marshal(input).(*jsonutils.JSONDict) - err := self.GetDriver().PerformStart(ctx, userCred, self, kwargs) + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } + err = driver.PerformStart(ctx, userCred, self, kwargs) return nil, err } else { return nil, httperrors.NewInvalidStatusError("Some disk not ready") @@ -1216,12 +1283,15 @@ func (self *SGuest) StartGuestStopTask(ctx context.Context, userCred mcclient.To if len(parentTaskId) > 0 { params.Add(jsonutils.JSONTrue, "subtask") } - driver := self.GetDriver() + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } shutdownMode := api.VM_SHUTDOWN_MODE_KEEP_CHARGING if stopCharging && driver.IsSupportShutdownMode() { shutdownMode = api.VM_SHUTDOWN_MODE_STOP_CHARGING } - _, err := db.Update(self, func() error { + _, err = db.Update(self, func() error { self.ShutdownMode = shutdownMode return nil }) @@ -1505,7 +1575,11 @@ func (self *SGuest) StartGuestCreateTask(ctx context.Context, userCred mcclient. self.fixFakeServerInfo(ctx, userCred) return nil } - return self.GetDriver().StartGuestCreateTask(self, ctx, userCred, input.JSON(input), pendingUsage, parentTaskId) + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + return driver.StartGuestCreateTask(self, ctx, userCred, input.JSON(input), pendingUsage, parentTaskId) } func (self *SGuest) fixFakeServerInfo(ctx context.Context, userCred mcclient.TokenCredential) { @@ -1553,7 +1627,11 @@ func (self *SGuest) fixFakeServerInfo(ctx context.Context, userCred mcclient.Tok } func (self *SGuest) StartSyncstatus(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { - return self.GetDriver().StartGuestSyncstatusTask(self, ctx, userCred, parentTaskId) + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } + return driver.StartGuestSyncstatusTask(self, ctx, userCred, parentTaskId) } func (self *SGuest) StartAutoDeleteGuestTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { @@ -1566,11 +1644,15 @@ func (self *SGuest) StartDeleteGuestTask( ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string, opts api.ServerDeleteInput, ) error { + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } params := jsonutils.NewDict() params.Add(jsonutils.NewString(self.Status), "guest_status") params.Update(jsonutils.Marshal(opts)) self.SetStatus(ctx, userCred, api.VM_START_DELETE, "") - return self.GetDriver().StartDeleteGuestTask(ctx, userCred, self, params, parentTaskId) + return driver.StartDeleteGuestTask(ctx, userCred, self, params, parentTaskId) } // 清除虚拟机记录(仅数据库操作) @@ -1608,7 +1690,11 @@ func (self *SGuest) PerformRebuildRoot( query jsonutils.JSONObject, input *api.ServerRebuildRootInput, ) (*api.SGuest, error) { - input, err := self.GetDriver().ValidateRebuildRoot(ctx, userCred, self, input) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + input, err = driver.ValidateRebuildRoot(ctx, userCred, self, input) if err != nil { return nil, err } @@ -1626,15 +1712,24 @@ func (self *SGuest) PerformRebuildRoot( if err != nil { return nil, httperrors.NewNotFoundError("failed to find %s", input.ImageId) } - err = self.GetDriver().ValidateImage(ctx, img) + err = driver.ValidateImage(ctx, img) if err != nil { return nil, err } + host, err := self.GetHost() + if err != nil { + return nil, errors.Wrapf(err, "GetHost") + } + + region, err := host.GetRegion() + if err != nil { + return nil, errors.Wrapf(err, "GetRegion") + } + // compare os arch if len(self.InstanceType) > 0 { - provider := GetDriver(self.Hypervisor).GetProvider() - sku, _ := ServerSkuManager.FetchSkuByNameAndProvider(self.InstanceType, provider, true) + sku, _ := ServerSkuManager.FetchSkuByNameAndProvider(self.InstanceType, region.Provider, true) if sku != nil && len(sku.CpuArch) > 0 && len(img.Properties["os_arch"]) > 0 && !strings.Contains(img.Properties["os_arch"], sku.CpuArch) { return nil, httperrors.NewConflictError("root disk image(%s) and sku(%s) architecture mismatch", img.Properties["os_arch"], sku.CpuArch) } @@ -1656,7 +1751,7 @@ func (self *SGuest) PerformRebuildRoot( } templateId := self.GetTemplateId() - if templateId != input.ImageId && len(templateId) > 0 && len(input.ImageId) > 0 && !self.GetDriver().IsRebuildRootSupportChangeUEFI() { + if templateId != input.ImageId && len(templateId) > 0 && len(input.ImageId) > 0 && !driver.IsRebuildRootSupportChangeUEFI() { q := CachedimageManager.Query().In("id", []string{input.ImageId, templateId}) images := []SCachedimage{} err := db.FetchModelObjects(CachedimageManager, q, &images) @@ -1668,17 +1763,17 @@ func (self *SGuest) PerformRebuildRoot( } } - rebuildStatus, err := self.GetDriver().GetRebuildRootStatus() + rebuildStatus, err := driver.GetRebuildRootStatus() if err != nil { return nil, httperrors.NewInputParameterError("%v", err) } - if !self.GetDriver().IsRebuildRootSupportChangeImage() && len(input.ImageId) > 0 { + if !driver.IsRebuildRootSupportChangeImage() && len(input.ImageId) > 0 { if len(templateId) == 0 { return nil, httperrors.NewBadRequestError("No template for root disk, cannot rebuild root") } if input.ImageId != templateId { - return nil, httperrors.NewInputParameterError("%s not support rebuild root with a different image", self.GetDriver().GetHypervisor()) + return nil, httperrors.NewInputParameterError("%s not support rebuild root with a different image", driver.GetHypervisor()) } } @@ -1895,15 +1990,21 @@ func (self *SGuest) PerformDetachdisk(ctx context.Context, userCred mcclient.Tok if !attached { return nil, nil } - detachDiskStatus, err := self.GetDriver().GetDetachDiskStatus() + + driver, err := self.GetDriver() if err != nil { return nil, err } - if input.KeepDisk && !self.GetDriver().CanKeepDetachDisk() { + + detachDiskStatus, err := driver.GetDetachDiskStatus() + if err != nil { + return nil, err + } + if input.KeepDisk && !driver.CanKeepDetachDisk() { return nil, httperrors.NewInputParameterError("Cannot keep detached disk") } - err = self.GetDriver().ValidateDetachDisk(ctx, userCred, self, disk) + err = driver.ValidateDetachDisk(ctx, userCred, self, disk) if err != nil { return nil, err } @@ -1935,8 +2036,12 @@ func (self *SGuest) StartGuestDetachdiskTask( return nil }) } + driver, err := self.GetDriver() + if err != nil { + return errors.Wrapf(err, "GetDriver") + } disk.SetStatus(ctx, userCred, api.DISK_DETACHING, "") - return self.GetDriver().StartGuestDetachdiskTask(ctx, userCred, self, taskData, parentTaskId) + return driver.StartGuestDetachdiskTask(ctx, userCred, self, taskData, parentTaskId) } // 卸载透传设备 @@ -2556,7 +2661,12 @@ func (self *SGuest) PerformDetachnetwork( return nil, httperrors.NewInvalidStatusError("Cannot detach network in status %s", self.Status) } - err := self.GetDriver().ValidateDetachNetwork(ctx, userCred, self) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + err = driver.ValidateDetachNetwork(ctx, userCred, self) if err != nil { if !input.IsForce() { return nil, errors.Wrap(err, "ValidateDetachNetwork") @@ -2915,7 +3025,11 @@ func (self *SGuest) PerformModifySrcCheck(ctx context.Context, userCred mcclient // 调整配置 func (self *SGuest) PerformChangeConfig(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.ServerChangeConfigInput) (jsonutils.JSONObject, error) { - if !self.GetDriver().AllowReconfigGuest() { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + if !driver.AllowReconfigGuest() { return nil, httperrors.NewInvalidStatusError("Not allow to change config") } @@ -2923,7 +3037,7 @@ func (self *SGuest) PerformChangeConfig(ctx context.Context, userCred mcclient.T return nil, httperrors.NewBadRequestError("Guest have backup not allow to change config") } - changeStatus, err := self.GetDriver().GetChangeConfigStatus(self) + changeStatus, err := driver.GetChangeConfigStatus(self) if err != nil { return nil, httperrors.NewInputParameterError("%v", err) } @@ -2936,13 +3050,13 @@ func (self *SGuest) PerformChangeConfig(ctx context.Context, userCred mcclient.T return nil, httperrors.NewInvalidStatusError("no valid host") } - confs, err := self.GetDriver().ValidateGuestChangeConfigInput(ctx, self, input) + confs, err := driver.ValidateGuestChangeConfigInput(ctx, self, input) if err != nil { return nil, errors.Wrap(err, "ValidateGuestChangeConfigInput") } if self.PowerStates == api.VM_POWER_STATES_ON && (confs.CpuChanged() || confs.MemChanged()) { - confs, err = self.GetDriver().ValidateGuestHotChangeConfigInput(ctx, self, confs) + confs, err = driver.ValidateGuestHotChangeConfigInput(ctx, self, confs) if err != nil { return nil, httperrors.NewInvalidStatusError("cannot change CPU/Memory spec in power status %s: %s", self.PowerStates, err) } @@ -3085,9 +3199,13 @@ func (self *SGuest) StartUndeployGuestTask(ctx context.Context, userCred mcclien // 重置虚拟机状态 func (self *SGuest) PerformReset(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) { + drv, err := self.GetDriver() + if err != nil { + return nil, err + } isHard := jsonutils.QueryBoolean(data, "is_hard", false) if self.Status == api.VM_RUNNING || self.Status == api.VM_STOP_FAILED { - self.GetDriver().StartGuestResetTask(self, ctx, userCred, isHard, "") + drv.StartGuestResetTask(self, ctx, userCred, isHard, "") return nil, nil } return nil, httperrors.NewInvalidStatusError("Cannot reset VM in status %s", self.Status) @@ -3268,10 +3386,13 @@ func (self *SGuest) StartGuestStopAndFreezeTask(ctx context.Context, userCred mc func (self *SGuest) PerformRestart(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) { isForce := jsonutils.QueryBoolean(data, "is_force", false) if utils.IsInStringArray(self.Status, []string{api.VM_RUNNING, api.VM_STOP_FAILED}) || (isForce && self.Status == api.VM_STOPPING) { - return nil, self.GetDriver().StartGuestRestartTask(self, ctx, userCred, isForce, "") - } else { - return nil, httperrors.NewInvalidStatusError("Cannot do restart server in status %s", self.Status) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + return nil, driver.StartGuestRestartTask(self, ctx, userCred, isForce, "") } + return nil, httperrors.NewInvalidStatusError("Cannot do restart server in status %s", self.Status) } // 发送远程命令 @@ -3486,7 +3607,12 @@ func (self *SGuest) PerformCreateEip(ctx context.Context, userCred mcclient.Toke } } - err = self.GetDriver().ValidateCreateEip(ctx, userCred, input) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + err = driver.ValidateCreateEip(ctx, userCred, input) if err != nil { return nil, err } @@ -4086,7 +4212,11 @@ func (self *SGuest) PerformCancelExpire(ctx context.Context, userCred mcclient.T if self.BillingType != billing_api.BILLING_TYPE_POSTPAID { return nil, httperrors.NewBadRequestError("guest billing type %s not support cancel expire", self.BillingType) } - if err := self.GetDriver().CancelExpireTime(ctx, userCred, self); err != nil { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + if err := driver.CancelExpireTime(ctx, userCred, self); err != nil { return nil, err } disks, err := self.GetDisks() @@ -4107,7 +4237,12 @@ func (self *SGuest) PerformPostpaidExpire(ctx context.Context, userCred mcclient return nil, httperrors.NewBadRequestError("guest billing type is %s", self.BillingType) } - if !self.GetDriver().IsSupportPostpaidExpire() { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + if !driver.IsSupportPostpaidExpire() { return nil, httperrors.NewBadRequestError("guest %s unsupport postpaid expire", self.Hypervisor) } @@ -4136,7 +4271,12 @@ func (self *SGuest) PerformRenew(ctx context.Context, userCred mcclient.TokenCre return nil, httperrors.NewInputParameterError("invalid duration %s: %s", durationStr, err) } - if !self.GetDriver().IsSupportedBillingCycle(bc) { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + if !driver.IsSupportedBillingCycle(bc) { return nil, httperrors.NewInputParameterError("unsupported duration %s", durationStr) } @@ -4471,9 +4611,14 @@ func (self *SGuest) GetDetailsVirtInstall( vdiListenPort int64 ) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + host, _ := self.GetHost() if utils.IsInStringArray(self.Status, []string{api.VM_RUNNING, api.VM_BLOCK_STREAM}) { - vncInfo, err := self.GetDriver().GetGuestVncInfo(ctx, userCred, self, host, nil) + vncInfo, err := driver.GetGuestVncInfo(ctx, userCred, self, host, nil) if err != nil { log.Errorln(err) return nil, err @@ -5534,7 +5679,11 @@ func (self *SGuest) PerformPublicipToEip(ctx context.Context, userCred mcclient. if !utils.IsInStringArray(self.Status, []string{api.VM_READY, api.VM_RUNNING}) { return nil, httperrors.NewUnsupportOperationError("The guest status need be %s or %s, current is %s", api.VM_READY, api.VM_RUNNING, self.Status) } - if !self.GetDriver().IsSupportPublicipToEip() { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + if !driver.IsSupportPublicipToEip() { return nil, httperrors.NewUnsupportOperationError("The %s guest not support public ip to eip operation", self.Hypervisor) } return nil, self.StartPublicipToEipTask(ctx, userCred, input.AutoStart, "") @@ -5585,7 +5734,12 @@ func (self *SGuest) PerformSetAutoRenew(ctx context.Context, userCred mcclient.T return nil, nil } - if !self.GetDriver().IsSupportSetAutoRenew() { + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + if !driver.IsSupportSetAutoRenew() { err := self.SetAutoRenew(input.AutoRenew) if err != nil { return nil, httperrors.NewGeneralError(err) @@ -5636,7 +5790,12 @@ func (self *SGuest) PerformOpenForward(ctx context.Context, userCred mcclient.To return nil, httperrors.NewInputParameterError("guest has no vpc ip") } - resp, err := self.GetDriver().RequestOpenForward(ctx, userCred, self, req) + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } + + resp, err := driver.RequestOpenForward(ctx, userCred, self, req) if err != nil { logclient.AddActionLogWithContext(ctx, self, logclient.ACT_WEBSSH, err, userCred, false) return nil, httperrors.NewGeneralError(err) @@ -5659,7 +5818,12 @@ func (self *SGuest) PerformCloseForward(ctx context.Context, userCred mcclient.T return nil, httperrors.NewInputParameterError("guest has no vpc ip") } - resp, err := self.GetDriver().RequestCloseForward(ctx, userCred, self, req) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + resp, err := driver.RequestCloseForward(ctx, userCred, self, req) if err != nil { return nil, httperrors.NewGeneralError(err) } @@ -5683,7 +5847,12 @@ func (self *SGuest) PerformListForward(ctx context.Context, userCred mcclient.To return nil, httperrors.NewInputParameterError("guest has no vpc ip") } - resp, err := self.GetDriver().RequestListForward(ctx, userCred, self, req) + driver, err := self.GetDriver() + if err != nil { + return nil, err + } + + resp, err := driver.RequestListForward(ctx, userCred, self, req) if err != nil { return nil, httperrors.NewGeneralError(err) } @@ -5718,7 +5887,10 @@ func (self *SGuest) PerformChangeStorage(ctx context.Context, userCred mcclient. } // driver validate - drv := self.GetDriver() + drv, err := self.GetDriver() + if err != nil { + return nil, err + } if err := drv.ValidateChangeDiskStorage(ctx, userCred, self, input.TargetStorageId); err != nil { return nil, err } @@ -5779,7 +5951,10 @@ func (self *SGuest) PerformChangeDiskStorage(ctx context.Context, userCred mccli input.TargetStorageId = storage.GetId() // driver validate - drv := self.GetDriver() + drv, err := self.GetDriver() + if err != nil { + return nil, err + } if err := drv.ValidateChangeDiskStorage(ctx, userCred, self, input.TargetStorageId); err != nil { return nil, err } @@ -5811,7 +5986,11 @@ func (self *SGuest) PerformChangeDiskStorage(ctx context.Context, userCred mccli func (self *SGuest) StartChangeDiskStorageTask(ctx context.Context, userCred mcclient.TokenCredential, input *api.ServerChangeDiskStorageInternalInput, parentTaskId string) error { reason := fmt.Sprintf("Change disk %s to storage %s", input.DiskId, input.TargetStorageId) self.SetStatus(ctx, userCred, api.VM_DISK_CHANGE_STORAGE, reason) - return self.GetDriver().StartChangeDiskStorageTask(self, ctx, userCred, input, parentTaskId) + driver, err := self.GetDriver() + if err != nil { + return err + } + return driver.StartChangeDiskStorageTask(self, ctx, userCred, input, parentTaskId) } func (self *SGuest) startSwitchToClonedDisk(ctx context.Context, userCred mcclient.TokenCredential, taskId string) error { @@ -5920,7 +6099,11 @@ func (self *SGuest) PerformProbeIsolatedDevices(ctx context.Context, userCred mc if err != nil { return nil, errors.Wrap(err, "GetHost") } - hostDevs, err := host.GetHostDriver().RequestProbeIsolatedDevices(ctx, userCred, host, data) + driver, err := host.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") + } + hostDevs, err := driver.RequestProbeIsolatedDevices(ctx, userCred, host, data) if err != nil { return nil, errors.Wrap(err, "RequestProbeIsolatedDevices") } @@ -6004,7 +6187,11 @@ func (self *SGuest) PerformCpusetRemove(ctx context.Context, userCred mcclient.T // TODO: maybe change to async task db.OpsLog.LogEvent(self, db.ACT_GUEST_CPUSET_REMOVE, nil, userCred) resp := new(api.ServerCPUSetRemoveResp) - if err := self.GetDriver().RequestCPUSetRemove(ctx, userCred, host, self, data); err != nil { + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + if err := drv.RequestCPUSetRemove(ctx, userCred, host, self, data); err != nil { db.OpsLog.LogEvent(self, db.ACT_GUEST_CPUSET_REMOVE_FAIL, err, userCred) logclient.AddActionLogWithContext(ctx, self, logclient.ACT_VM_CPUSET_REMOVE, data, userCred, false) resp.Error = err.Error() @@ -6174,7 +6361,11 @@ func (self *SGuest) PerformEnableMemclean(ctx context.Context, userCred mcclient // 设置操作系统信息 func (self *SGuest) PerformSetOsInfo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.ServerSetOSInfoInput) (jsonutils.JSONObject, error) { - if err := self.GetDriver().ValidateSetOSInfo(ctx, userCred, self, &input); err != nil { + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + if err := drv.ValidateSetOSInfo(ctx, userCred, self, &input); err != nil { return nil, err } @@ -6204,7 +6395,11 @@ func (self *SGuest) PerformSetOsInfo(ctx context.Context, userCred mcclient.Toke // 同步操作系统信息 func (self *SGuest) PerformSyncOsInfo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) { - if err := self.GetDriver().ValidateSyncOSInfo(ctx, userCred, self); err != nil { + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + if err := drv.ValidateSyncOSInfo(ctx, userCred, self); err != nil { return nil, err } if self.Status == api.VM_READY { diff --git a/pkg/compute/models/guest_convert.go b/pkg/compute/models/guest_convert.go index 401f34f797e..5ea924e2f74 100644 --- a/pkg/compute/models/guest_convert.go +++ b/pkg/compute/models/guest_convert.go @@ -48,14 +48,18 @@ func (self *SGuest) PerformConvertToKvm( if len(self.GetMetadata(ctx, api.SERVER_META_CONVERTED_SERVER, userCred)) > 0 { return nil, httperrors.NewBadRequestError("guest has been converted") } - switch self.Hypervisor { - case api.HYPERVISOR_ESXI: + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + + if drv.GetProvider() == api.CLOUD_PROVIDER_ONECLOUD && drv.GetHypervisor() == api.HYPERVISOR_ESXI { return self.ConvertEsxiToKvm(ctx, userCred, data) - case api.HYPERVISOR_CLOUDPODS: + } + if drv.GetProvider() == api.CLOUD_PROVIDER_CLOUDPODS && drv.GetHypervisor() == api.HYPERVISOR_DEFAULT { return self.ConvertCloudpodsToKvm(ctx, userCred, data) - default: - return nil, httperrors.NewBadRequestError("not support %s", self.Hypervisor) } + return nil, httperrors.NewBadRequestError("not support %s", self.Hypervisor) } func (self *SGuest) ConvertCloudpodsToKvm(ctx context.Context, userCred mcclient.TokenCredential, data *api.ConvertToKvmInput) (jsonutils.JSONObject, error) { diff --git a/pkg/compute/models/guest_secgroups.go b/pkg/compute/models/guest_secgroups.go index 0537f09c231..4f2d4b629ec 100644 --- a/pkg/compute/models/guest_secgroups.go +++ b/pkg/compute/models/guest_secgroups.go @@ -44,7 +44,12 @@ func (self *SGuest) PerformAddSecgroup( return nil, httperrors.NewInputParameterError("Cannot add security groups in status %s", self.Status) } - maxCount := self.GetDriver().GetMaxSecurityGroupCount() + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + + maxCount := drv.GetMaxSecurityGroupCount() if maxCount == 0 { return nil, httperrors.NewUnsupportOperationError("Cannot add security groups for hypervisor %s", self.Hypervisor) } @@ -290,7 +295,12 @@ func (self *SGuest) PerformSetSecgroup( return nil, httperrors.NewMissingParameterError("secgroup_ids") } - maxCount := self.GetDriver().GetMaxSecurityGroupCount() + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + + maxCount := drv.GetMaxSecurityGroupCount() if maxCount == 0 { return nil, httperrors.NewUnsupportOperationError("Cannot set security group for this guest %s", self.Name) } diff --git a/pkg/compute/models/guest_template.go b/pkg/compute/models/guest_template.go index a6a6954a146..f93ca645411 100644 --- a/pkg/compute/models/guest_template.go +++ b/pkg/compute/models/guest_template.go @@ -360,8 +360,7 @@ func (gt *SGuestTemplate) getMoreDetails(ctx context.Context, userCred mcclient. // sku deal if len(input.InstanceType) > 0 { skuOutput := computeapis.GuestTemplateSku{} - provider := GetDriver(gt.Hypervisor).GetProvider() - sku, err := ServerSkuManager.FetchSkuByNameAndProvider(input.InstanceType, provider, true) + sku, err := ServerSkuManager.FetchSkuByNameAndProvider(input.InstanceType, out.Provider, true) if err != nil { skuOutput.Name = input.InstanceType skuOutput.MemorySizeMb = gt.VmemSize diff --git a/pkg/compute/models/guestdrivers.go b/pkg/compute/models/guestdrivers.go index 61a4cc17237..5a07420e094 100644 --- a/pkg/compute/models/guestdrivers.go +++ b/pkg/compute/models/guestdrivers.go @@ -22,6 +22,7 @@ import ( "yunion.io/x/cloudmux/pkg/cloudprovider" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" "yunion.io/x/pkg/util/billing" "yunion.io/x/pkg/util/rbacscope" @@ -260,16 +261,17 @@ func init() { } func RegisterGuestDriver(driver IGuestDriver) { - guestDrivers[driver.GetHypervisor()] = driver + key := fmt.Sprintf("%s-%s", driver.GetHypervisor(), driver.GetProvider()) + guestDrivers[key] = driver } -func GetDriver(hypervisor string) IGuestDriver { - driver, ok := guestDrivers[hypervisor] +func GetDriver(hypervisor, provider string) (IGuestDriver, error) { + key := fmt.Sprintf("%s-%s", hypervisor, provider) + driver, ok := guestDrivers[key] if ok { - return driver - } else { - panic(fmt.Sprintf("Unsupported hypervisor %q", hypervisor)) + return driver, nil } + return nil, errors.Wrapf(errors.ErrNotFound, "hypervisor: %s provider: %s", hypervisor, provider) } func GetNotSupportAutoRenewHypervisors() []string { diff --git a/pkg/compute/models/guests.go b/pkg/compute/models/guests.go index 5cc6af1fae4..dd817325d16 100644 --- a/pkg/compute/models/guests.go +++ b/pkg/compute/models/guests.go @@ -900,21 +900,50 @@ func (manager *SGuestManager) InitializeData() error { func (guest *SGuest) GetHypervisor() string { if len(guest.Hypervisor) == 0 { return api.HYPERVISOR_DEFAULT - } else { - return guest.Hypervisor } + return guest.Hypervisor } func (guest *SGuest) GetHostType() string { - return api.HYPERVISOR_HOSTTYPE[guest.Hypervisor] + host, err := guest.GetHost() + if err != nil { + return "" + } + return host.HostType +} + +func (guest *SGuest) GetRegion() (*SCloudregion, error) { + hosts := HostManager.Query("zone_id").Equals("id", guest.HostId).SubQuery() + zones := ZoneManager.Query("cloudregion_id").In("id", hosts).SubQuery() + q := CloudregionManager.Query().In("id", zones) + ret := &SCloudregion{} + ret.SetModelManager(CloudregionManager, ret) + err := q.First(ret) + if err != nil { + return nil, errors.Wrapf(err, "q.First") + } + return ret, nil +} + +func (guest *SGuest) GetZone() (*SZone, error) { + hosts := HostManager.Query("zone_id").Equals("id", guest.HostId).SubQuery() + q := ZoneManager.Query().In("id", hosts) + ret := &SZone{} + ret.SetModelManager(ZoneManager, ret) + err := q.First(ret) + if err != nil { + return nil, errors.Wrapf(err, "q.First") + } + return ret, nil } -func (guest *SGuest) GetDriver() IGuestDriver { +func (guest *SGuest) GetDriver() (IGuestDriver, error) { hypervisor := guest.GetHypervisor() - if !utils.IsInStringArray(hypervisor, api.HYPERVISORS) { - log.Fatalf("Unsupported hypervisor %s", hypervisor) + region, err := guest.GetRegion() + if err != nil { + return nil, errors.Wrapf(err, "GetRegion") } - return GetDriver(hypervisor) + return GetDriver(hypervisor, region.Provider) } func (guest *SGuest) validateDeleteCondition(ctx context.Context, isPurge bool) error { @@ -1232,7 +1261,11 @@ func (guest *SGuest) SetHostIdWithBackup(userCred mcclient.TokenCredential, mast } func (guest *SGuest) ValidateResizeDisk(disk *SDisk, storage *SStorage) error { - return guest.GetDriver().ValidateResizeDisk(guest, disk, storage) + drv, err := guest.GetDriver() + if err != nil { + return err + } + return drv.ValidateResizeDisk(guest, disk, storage) } func ValidateMemData(vmemSize int, driver IGuestDriver) (int, error) { @@ -1253,13 +1286,15 @@ func ValidateCpuData(vcpuCount int, driver IGuestDriver) (int, error) { return vcpuCount, nil } -func ValidateMemCpuData(vmemSize, vcpuCount int, hypervisor string) (int, int, error) { +func ValidateMemCpuData(vmemSize, vcpuCount int, hypervisor, provider string) (int, int, error) { if len(hypervisor) == 0 { hypervisor = api.HYPERVISOR_DEFAULT } - driver := GetDriver(hypervisor) + driver, err := GetDriver(hypervisor, provider) + if err != nil { + return 0, 0, err + } - var err error vmemSize, err = ValidateMemData(vmemSize, driver) if err != nil { return 0, 0, err @@ -1284,8 +1319,11 @@ func (self *SGuest) ValidateUpdateData(ctx context.Context, userCred mcclient.To } } - var err error - input, err = self.GetDriver().ValidateUpdateData(ctx, self, userCred, input) + drv, err := self.GetDriver() + if err != nil { + return input, err + } + input, err = drv.ValidateUpdateData(ctx, self, userCred, input) if err != nil { return input, errors.Wrap(err, "GetDriver().ValidateUpdateData") } @@ -1297,36 +1335,48 @@ func (self *SGuest) ValidateUpdateData(ctx context.Context, userCred mcclient.To return input, nil } -func serverCreateInput2ComputeQuotaKeys(input api.ServerCreateInput, ownerId mcclient.IIdentityProvider) SComputeResourceKeys { - // input.Hypervisor must be set - brand := guessBrandForHypervisor(input.Hypervisor) - keys := GetDriver(input.Hypervisor).GetComputeQuotaKeys( - rbacscope.ScopeProject, - ownerId, - brand, - ) +func serverCreateInput2ComputeQuotaKeys(input api.ServerCreateInput, ownerId mcclient.IIdentityProvider) (SComputeResourceKeys, error) { + var keys SComputeResourceKeys if len(input.PreferHost) > 0 { - hostObj, _ := HostManager.FetchById(input.PreferHost) + hostObj, err := HostManager.FetchById(input.PreferHost) + if err != nil { + return keys, err + } host := hostObj.(*SHost) - zone, _ := host.GetZone() - keys.ZoneId = zone.Id - keys.RegionId = zone.CloudregionId - } else if len(input.PreferZone) > 0 { - zoneObj, _ := ZoneManager.FetchById(input.PreferZone) - zone := zoneObj.(*SZone) - keys.ZoneId = zone.Id - keys.RegionId = zone.CloudregionId - } else if len(input.PreferWire) > 0 { - wireObj, _ := WireManager.FetchById(input.PreferWire) + input.PreferZone = host.ZoneId + keys.ZoneId = host.ZoneId + } + if len(input.PreferWire) > 0 { + wireObj, err := WireManager.FetchById(input.PreferWire) + if err != nil { + return keys, err + } wire := wireObj.(*SWire) - zone, _ := wire.GetZone() + if len(wire.ZoneId) > 0 { + input.PreferZone = wire.ZoneId + keys.ZoneId = wire.ZoneId + } + } + if len(input.PreferZone) > 0 { + zoneObj, err := ZoneManager.FetchById(input.PreferZone) + if err != nil { + return keys, err + } + zone := zoneObj.(*SZone) + input.PreferRegion = zone.CloudregionId keys.ZoneId = zone.Id keys.RegionId = zone.CloudregionId - } else if len(input.PreferRegion) > 0 { - regionObj, _ := CloudregionManager.FetchById(input.PreferRegion) - keys.RegionId = regionObj.GetId() } - return keys + if len(input.PreferRegion) > 0 { + regionObj, err := CloudregionManager.FetchById(input.PreferRegion) + if err != nil { + return keys, err + } + region := regionObj.(*SCloudregion) + keys.RegionId = region.GetId() + keys.Brand = region.Provider + } + return keys, nil } func (manager *SGuestManager) BatchPreValidate( @@ -1506,6 +1556,23 @@ func (manager *SGuestManager) validateCreateData( return nil, errors.Wrap(err, "checkGuestImage") } + if len(input.PreferZone) > 0 && len(input.Provider) == 0 { + zoneObj, err := ZoneManager.FetchById(input.PreferZone) + if err != nil { + return nil, errors.Wrapf(err, "zone fetch by id %s", input.PreferZone) + } + zone := zoneObj.(*SZone) + input.PreferRegion = zone.CloudregionId + } + if len(input.PreferRegion) > 0 && len(input.Provider) == 0 { + regionObj, err := CloudregionManager.FetchById(input.PreferRegion) + if err != nil { + return nil, errors.Wrapf(err, "region fetch by id %s", input.PreferRegion) + } + region := regionObj.(*SCloudregion) + input.Provider = region.Provider + } + var hypervisor string // var rootStorageType string var osProf osprofile.SOSProfile @@ -1658,13 +1725,16 @@ func (manager *SGuestManager) validateCreateData( } hypervisor = input.Hypervisor + driver, err := GetDriver(hypervisor, input.Provider) + if err != nil { + return nil, err + } if hypervisor != api.HYPERVISOR_POD { // support sku here var sku *SServerSku skuName := input.InstanceType if len(skuName) > 0 { - provider := GetDriver(input.Hypervisor).GetProvider() - sku, err = ServerSkuManager.FetchSkuByNameAndProvider(skuName, provider, true) + sku, err = ServerSkuManager.FetchSkuByNameAndProvider(skuName, input.Provider, true) if err != nil { return nil, err } @@ -1673,7 +1743,7 @@ func (manager *SGuestManager) validateCreateData( input.VmemSize = sku.MemorySizeMB input.VcpuCount = sku.CpuCoreCount } else { - vmemSize, vcpuCount, err := ValidateMemCpuData(input.VmemSize, input.VcpuCount, input.Hypervisor) + vmemSize, vcpuCount, err := ValidateMemCpuData(input.VmemSize, input.VcpuCount, input.Hypervisor, input.Provider) if err != nil { return nil, err } @@ -1726,10 +1796,10 @@ func (manager *SGuestManager) validateCreateData( if len(defaultStorageType) > 0 { rootDiskConfig.Backend = defaultStorageType } else { - rootDiskConfig.Backend = GetDriver(hypervisor).GetDefaultSysDiskBackend() + rootDiskConfig.Backend = driver.GetDefaultSysDiskBackend() } } - sysMinDiskMB := GetDriver(hypervisor).GetMinimalSysDiskSizeGb() * 1024 + sysMinDiskMB := driver.GetMinimalSysDiskSizeGb() * 1024 if rootDiskConfig.SizeMb != api.DISK_SIZE_AUTOEXTEND && rootDiskConfig.SizeMb < sysMinDiskMB { rootDiskConfig.SizeMb = sysMinDiskMB } @@ -1798,11 +1868,11 @@ func (manager *SGuestManager) validateCreateData( } if input.BillingType == billing_api.BILLING_TYPE_POSTPAID { - if !GetDriver(hypervisor).IsSupportPostpaidExpire() { + if !driver.IsSupportPostpaidExpire() { return nil, httperrors.NewBadRequestError("guest %s unsupport postpaid expire", hypervisor) } } else { - if !GetDriver(hypervisor).IsSupportedBillingCycle(billingCycle) { + if !driver.IsSupportedBillingCycle(billingCycle) { return nil, httperrors.NewInputParameterError("unsupported duration %s", input.Duration) } } @@ -1942,7 +2012,7 @@ func (manager *SGuestManager) validateCreateData( input.SecgroupId = options.Options.DefaultSecurityGroupId } - maxSecgrpCount := GetDriver(hypervisor).GetMaxSecurityGroupCount() + maxSecgrpCount := driver.GetMaxSecurityGroupCount() if maxSecgrpCount == 0 { //esxi 不支持安全组 input.SecgroupId = "" input.Secgroups = []string{} @@ -1963,7 +2033,7 @@ func (manager *SGuestManager) validateCreateData( }*/ if input.ResourceType != api.HostResourceTypePrepaidRecycle { - input, err = GetDriver(hypervisor).ValidateCreateData(ctx, userCred, input) + input, err = driver.ValidateCreateData(ctx, userCred, input) if err != nil { return nil, err } @@ -2089,8 +2159,13 @@ func (manager *SGuestManager) ValidateCreateData(ctx context.Context, userCred m func (manager *SGuestManager) validateEip(ctx context.Context, userCred mcclient.TokenCredential, input *api.ServerCreateInput, preferRegionId string, preferManagerId string) error { + driver, err := GetDriver(input.Hypervisor, input.Provider) + if err != nil { + return err + } if input.PublicIpBw > 0 { - if !GetDriver(input.Hypervisor).IsSupportPublicIp() { + + if !driver.IsSupportPublicIp() { return httperrors.NewNotImplementedError("public ip not supported for %s", input.Hypervisor) } if len(input.PublicIpChargeType) == 0 { @@ -2107,7 +2182,7 @@ func (manager *SGuestManager) validateEip(ctx context.Context, userCred mcclient eipStr := input.Eip eipBw := input.EipBw if len(eipStr) > 0 || eipBw > 0 { - if !GetDriver(input.Hypervisor).IsSupportEip() { + if !driver.IsSupportEip() { return httperrors.NewNotImplementedError("eip not supported for %s", input.Hypervisor) } if len(eipStr) > 0 { @@ -2285,7 +2360,7 @@ func getGuestResourceRequirements( //Ebw: eBw * count, Eip: eipCnt * count, } - keys := serverCreateInput2ComputeQuotaKeys(input, ownerId) + keys, _ := serverCreateInput2ComputeQuotaKeys(input, ownerId) req.SetKeys(keys) regionReq.SetKeys(keys.SRegionalCloudResourceKeys) return req, regionReq @@ -2329,7 +2404,9 @@ func (guest *SGuest) PostCreate(ctx context.Context, userCred mcclient.TokenCred guest.setUserData(ctx, userCred, userData) } - if guest.GetDriver().GetMaxSecurityGroupCount() > 0 { + provider, _ := data.GetString("provider") + drv, _ := GetDriver(guest.Hypervisor, provider) + if drv != nil && drv.GetMaxSecurityGroupCount() > 0 { secgroups, _ := jsonutils.GetStringArray(data, "secgroups") for _, secgroupId := range secgroups { if secgroupId != guest.SecgrpId { @@ -2510,10 +2587,12 @@ func (self *SGuest) moreExtraInfo( } } - out.CdromSupport, _ = self.GetDriver().IsSupportCdrom(self) - out.FloppySupport, _ = self.GetDriver().IsSupportFloppy(self) - - out.MonitorUrl = self.GetDriver().FetchMonitorUrl(ctx, self) + drv, _ := self.GetDriver() + if drv != nil { + out.CdromSupport, _ = drv.IsSupportCdrom(self) + out.FloppySupport, _ = drv.IsSupportFloppy(self) + out.MonitorUrl = drv.FetchMonitorUrl(ctx, self) + } return out } @@ -3041,7 +3120,11 @@ func (guest *SGuest) SyncAllWithCloudVM(ctx context.Context, userCred mcclient.T } func (g *SGuest) SyncOsInfo(ctx context.Context, userCred mcclient.TokenCredential, extVM cloudprovider.IOSInfo) error { - return g.GetDriver().SyncOsInfo(ctx, userCred, g, extVM) + drv, err := g.GetDriver() + if err != nil { + return err + } + return drv.SyncOsInfo(ctx, userCred, g, extVM) } func (g *SGuest) SyncHostname(ext cloudprovider.ICloudVM) { @@ -3126,7 +3209,8 @@ func (g *SGuest) syncWithCloudVM(ctx context.Context, userCred mcclient.TokenCre if provider.GetFactory().IsSupportPrepaidResources() && !recycle { g.BillingType = extVM.GetBillingType() g.ExpiredAt = extVM.GetExpiredAt() - if g.GetDriver().IsSupportSetAutoRenew() { + drv, _ := g.GetDriver() + if drv != nil && drv.IsSupportSetAutoRenew() { g.AutoRenew = extVM.IsAutoRenew() } } @@ -3201,7 +3285,8 @@ func (manager *SGuestManager) newCloudVM(ctx context.Context, userCred mcclient. if expired := extVM.GetExpiredAt(); !expired.IsZero() { guest.ExpiredAt = expired } - if guest.GetDriver().IsSupportSetAutoRenew() { + drv, _ := guest.GetDriver() + if drv != nil && drv.IsSupportSetAutoRenew() { guest.AutoRenew = extVM.IsAutoRenew() } } @@ -3276,7 +3361,8 @@ func (manager *SGuestManager) newCloudVM(ctx context.Context, userCred mcclient. Action: notifyclient.ActionSyncCreate, }) - if guest.GetDriver().GetMaxSecurityGroupCount() == 0 { + drv, _ := guest.GetDriver() + if drv != nil && drv.GetMaxSecurityGroupCount() == 0 { db.Update(&guest, func() error { guest.SecgrpId = "" return nil @@ -4253,7 +4339,10 @@ func (self *SGuest) attach2NetworkDesc( } func (self *SGuest) attach2NamedNetworkDesc(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, netConfig *api.NetworkConfig, pendingUsage quotas.IQuota) ([]SGuestnetwork, error) { - driver := self.GetDriver() + driver, err := self.GetDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetDriver") + } net, nicConfs, allocDir, reuseAddr, err := driver.GetNamedNetworkConfiguration(self, ctx, userCred, host, netConfig) if err != nil { if errors.Cause(err) == sql.ErrNoRows { @@ -4322,7 +4411,10 @@ func (self *SGuest) attach2NamedNetworkDesc(ctx context.Context, userCred mcclie } func (self *SGuest) attach2RandomNetwork(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, netConfig *api.NetworkConfig, pendingUsage quotas.IQuota) ([]SGuestnetwork, error) { - driver := self.GetDriver() + driver, err := self.GetDriver() + if err != nil { + return nil, err + } return driver.Attach2RandomNetwork(self, ctx, userCred, host, netConfig, pendingUsage) } @@ -4442,10 +4534,15 @@ func (self *SGuest) CreateDiskOnStorage(ctx context.Context, userCred mcclient.T } func (self *SGuest) ChooseHostStorage(host *SHost, diskConfig *api.DiskConfig, candidate *schedapi.CandidateDisk) (*SStorage, error) { + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + if candidate == nil || len(candidate.StorageIds) == 0 { - return self.GetDriver().ChooseHostStorage(host, self, diskConfig, nil) + return drv.ChooseHostStorage(host, self, diskConfig, nil) } - return self.GetDriver().ChooseHostStorage(host, self, diskConfig, candidate.StorageIds) + return drv.ChooseHostStorage(host, self, diskConfig, candidate.StorageIds) } func (self *SGuest) createDiskOnHost( @@ -4742,6 +4839,9 @@ func (self *SGuest) AllowDeleteItem(ctx context.Context, userCred mcclient.Token // 删除虚拟机 func (self *SGuest) CustomizeDelete(ctx context.Context, userCred mcclient.TokenCredential, query api.ServerDeleteInput, data jsonutils.JSONObject) error { + if len(self.HostId) == 0 { + return self.RealDelete(ctx, userCred) + } return self.StartDeleteGuestTask(ctx, userCred, "", query) } @@ -4801,7 +4901,12 @@ func (self *SGuest) isNeedDoResetPasswd() bool { func (self *SGuest) GetDeployConfigOnHost(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, params *jsonutils.JSONDict) (*jsonutils.JSONDict, error) { config := jsonutils.NewDict() - desc, err := self.GetDriver().GetJsonDescAtHost(ctx, userCred, self, host, params) + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + + desc, err := drv.GetJsonDescAtHost(ctx, userCred, self, host, params) if err != nil { return nil, errors.Wrapf(err, "GetJsonDescAtHost") } @@ -4877,7 +4982,7 @@ func (self *SGuest) GetDeployConfigOnHost(ctx context.Context, userCred mcclient config.Add(jsonutils.NewString(onFinish), "on_finish") if jsonutils.QueryBoolean(params, "deploy_telegraf", false) { - influxdbUrl := self.GetDriver().FetchMonitorUrl(ctx, self) + influxdbUrl := drv.FetchMonitorUrl(ctx, self) config.Add(jsonutils.JSONTrue, "deploy_telegraf") serverDetails, err := self.getDetails(ctx, userCred) if err != nil { @@ -5502,12 +5607,12 @@ func (self *SGuest) SaveDeployInfo(ctx context.Context, userCred mcclient.TokenC self.saveOsType(userCred, deployInfo.Os) info["os_name"] = deployInfo.Os } - driver := self.GetDriver() + driver, _ := self.GetDriver() if len(deployInfo.Account) > 0 { info["login_account"] = deployInfo.Account if len(deployInfo.Key) > 0 { info["login_key"] = deployInfo.Key - if len(self.KeypairId) > 0 && !driver.IsSupportdDcryptPasswordFromSecretKey() { // Tencent Cloud does not support simultaneous setting of secret keys and passwords + if len(self.KeypairId) > 0 && (driver != nil && !driver.IsSupportdDcryptPasswordFromSecretKey()) { // Tencent Cloud does not support simultaneous setting of secret keys and passwords info["login_key"], _ = seclib2.EncryptBase64(self.GetKeypairPublicKey(), "") } info["login_key_timestamp"] = timeutils.UtcNow() @@ -5756,7 +5861,11 @@ func (manager *SGuestManager) AutoRenewPrepaidServer(ctx context.Context, userCr return } for i := 0; i < len(guests); i += 1 { - if len(guests[i].ExternalId) > 0 && !guests[i].GetDriver().IsSupportSetAutoRenew() { + drv, err := guests[i].GetDriver() + if err != nil { + continue + } + if len(guests[i].ExternalId) > 0 && !drv.IsSupportSetAutoRenew() { err := guests[i].doExternalSync(ctx, userCred) if err == nil && guests[i].IsValidPrePaid() { continue @@ -5932,7 +6041,11 @@ func (self *SGuest) getSecgroupsBySecgroupExternalIds(externalIds []string) ([]S func (self *SGuest) SyncVMSecgroups(ctx context.Context, userCred mcclient.TokenCredential, externalIds []string) error { // clear secgroup if vm not support security group - if self.GetDriver().GetMaxSecurityGroupCount() == 0 || len(externalIds) == 0 { + drv, err := self.GetDriver() + if err != nil { + return err + } + if drv.GetMaxSecurityGroupCount() == 0 || len(externalIds) == 0 { _, err := db.Update(self, func() error { self.SecgrpId = "" self.AdminSecgrpId = "" @@ -6229,8 +6342,8 @@ func (self *SGuest) ToCreateInput(ctx context.Context, userCred mcclient.TokenCr userInput.KeypairId = genInput.KeypairId userInput.EipBw = genInput.EipBw userInput.EipChargeType = genInput.EipChargeType - provider := self.GetDriver() - if provider.IsSupportPublicIp() { + drv, _ := self.GetDriver() + if drv != nil && drv.IsSupportPublicIp() { userInput.PublicIpBw = genInput.PublicIpBw userInput.PublicIpChargeType = genInput.PublicIpChargeType } @@ -6304,7 +6417,8 @@ func (self *SGuest) toCreateInput() *api.ServerCreateInput { r.EipBw = eip.Bandwidth r.EipChargeType = eip.ChargeType case api.EIP_MODE_INSTANCE_PUBLICIP: - if driver := self.GetDriver(); driver.IsSupportPublicIp() { + drv, _ := self.GetDriver() + if drv != nil && drv.IsSupportPublicIp() { r.PublicIpBw = eip.Bandwidth r.PublicIpChargeType = eip.ChargeType } @@ -6575,23 +6689,22 @@ func (guest *SGuest) GetRegionalQuotaKeys() (quotas.IQuotaKeys, error) { return fetchRegionalQuotaKeys(rbacscope.ScopeProject, guest.GetOwnerId(), region, provider), nil } -func (guest *SGuest) GetQuotaKeys() (quotas.IQuotaKeys, error) { - host, _ := guest.GetHost() - if host == nil { - return nil, errors.Wrap(httperrors.ErrInvalidStatus, "no valid host") - } - provider := host.GetCloudprovider() - if provider == nil && len(host.ManagerId) > 0 { - return nil, errors.Wrap(httperrors.ErrInvalidStatus, "no valid manager") - } - zone, _ := host.GetZone() - if zone == nil { - return nil, errors.Wrap(httperrors.ErrInvalidStatus, "no valid zone") +func (guest *SGuest) GetCloudprovider() (*SCloudprovider, error) { + hosts := HostManager.Query("manager_id").Equals("id", guest.HostId).SubQuery() + q := CloudproviderManager.Query().In("id", hosts) + ret := &SCloudprovider{} + ret.SetModelManager(CloudproviderManager, ret) + err := q.First(ret) + if err != nil { + return nil, errors.Wrapf(err, "q.First") } + return ret, nil +} + +func (guest *SGuest) GetQuotaKeys() (quotas.IQuotaKeys, error) { + provider, _ := guest.GetCloudprovider() + zone, _ := guest.GetZone() hypervisor := guest.Hypervisor - if !utils.IsInStringArray(hypervisor, api.ONECLOUD_HYPERVISORS) { - hypervisor = "" - } return fetchComputeQuotaKeys( rbacscope.ScopeProject, guest.GetOwnerId(), diff --git a/pkg/compute/models/helper.go b/pkg/compute/models/helper.go index 9d361d02fea..dc387ab376a 100644 --- a/pkg/compute/models/helper.go +++ b/pkg/compute/models/helper.go @@ -84,24 +84,34 @@ func ValidateScheduleCreateData(ctx context.Context, userCred mcclient.TokenCred return nil, httperrors.NewInvalidStatusError("Baremetal %s not enabled", bmName) } - if len(hypervisor) > 0 && hypervisor != api.HOSTTYPE_HYPERVISOR[baremetal.HostType] { + hostDriver, err := baremetal.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") + } + + if len(hypervisor) > 0 && hypervisor != hostDriver.GetHypervisor() { return nil, httperrors.NewInputParameterError("cannot run hypervisor %s on specified host with type %s", hypervisor, baremetal.HostType) } if len(hypervisor) == 0 { - hypervisor = api.HOSTTYPE_HYPERVISOR[baremetal.HostType] + hypervisor = hostDriver.GetHypervisor() } if len(hypervisor) == 0 { hypervisor = api.HYPERVISOR_DEFAULT } - _, err = GetDriver(hypervisor).ValidateCreateDataOnHost(ctx, userCred, bmName, baremetal, input) + driver, err := GetDriver(hypervisor, input.Provider) + if err != nil { + return nil, err + } + + _, err = driver.ValidateCreateDataOnHost(ctx, userCred, bmName, baremetal, input) if err != nil { return nil, err } - defaultStorage, err := GetDriver(hypervisor).ChooseHostStorage(baremetal, nil, &api.DiskConfig{}, nil) + defaultStorage, err := driver.ChooseHostStorage(baremetal, nil, &api.DiskConfig{}, nil) if err != nil { return nil, errors.Wrap(err, "ChooseHostStorage") } diff --git a/pkg/compute/models/host_recycle.go b/pkg/compute/models/host_recycle.go index c4f51c7741a..c1e60f15234 100644 --- a/pkg/compute/models/host_recycle.go +++ b/pkg/compute/models/host_recycle.go @@ -731,7 +731,17 @@ func (self *SHost) PerformRenewPrepaidRecycle(ctx context.Context, userCred mccl return nil, httperrors.NewInputParameterError("invalid duration %s: %s", durationStr, err) } - if !GetDriver(api.HOSTTYPE_HYPERVISOR[self.HostType]).IsSupportedBillingCycle(bc) { + hostDriver, err := self.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") + } + + driver, err := GetDriver(hostDriver.GetHypervisor(), hostDriver.GetProvider()) + if err != nil { + return nil, err + } + + if !driver.IsSupportedBillingCycle(bc) { return nil, httperrors.NewInputParameterError("unsupported duration %s", durationStr) } diff --git a/pkg/compute/models/hostdrivers.go b/pkg/compute/models/hostdrivers.go index 44605659c1a..3401a451036 100644 --- a/pkg/compute/models/hostdrivers.go +++ b/pkg/compute/models/hostdrivers.go @@ -16,9 +16,11 @@ package models import ( "context" + "fmt" "yunion.io/x/jsonutils" - "yunion.io/x/log" + "yunion.io/x/pkg/errors" + "yunion.io/x/pkg/utils" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" @@ -27,12 +29,18 @@ import ( type IHostDriver interface { GetHostType() string + GetProvider() string GetHypervisor() string + RequestBaremetalUnmaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *SHost, task taskman.ITask) error + RequestBaremetalMaintence(ctx context.Context, userCred mcclient.TokenCredential, baremetal *SHost, task taskman.ITask) error + RequestSyncBaremetalHostStatus(ctx context.Context, userCred mcclient.TokenCredential, baremetal *SHost, task taskman.ITask) error + RequestSyncBaremetalHostConfig(ctx context.Context, userCred mcclient.TokenCredential, baremetal *SHost, task taskman.ITask) error + CheckAndSetCacheImage(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, storagecache *SStoragecache, task taskman.ITask) error RequestUncacheImage(ctx context.Context, host *SHost, storageCache *SStoragecache, task taskman.ITask) error - ValidateUpdateDisk(ctx context.Context, userCred mcclient.TokenCredential, input api.DiskUpdateInput) (api.DiskUpdateInput, error) + ValidateUpdateDisk(ctx context.Context, userCred mcclient.TokenCredential, input *api.DiskUpdateInput) (*api.DiskUpdateInput, error) ValidateResetDisk(ctx context.Context, userCred mcclient.TokenCredential, disk *SDisk, snapshot *SSnapshot, guests []SGuest, input *api.DiskResetInput) (*api.DiskResetInput, error) ValidateDiskSize(storage *SStorage, sizeGb int) error RequestPrepareSaveDiskOnHost(ctx context.Context, host *SHost, disk *SDisk, imageId string, task taskman.ITask) error @@ -78,15 +86,29 @@ func init() { } func RegisterHostDriver(driver IHostDriver) { - hostDrivers[driver.GetHostType()] = driver + key := fmt.Sprintf("%s-%s", driver.GetHostType(), driver.GetProvider()) + hostDrivers[key] = driver } -func GetHostDriver(hostType string) IHostDriver { - driver, ok := hostDrivers[hostType] +func GetHostDriver(hostType, provider string) (IHostDriver, error) { + key := fmt.Sprintf("%s-%s", hostType, provider) + driver, ok := hostDrivers[key] if ok { - return driver - } else { - log.Fatalf("Unsupported hostType %s", hostType) - return nil + return driver, nil + } + return nil, errors.Wrapf(errors.ErrNotFound, "host type: %s provider: %s", hostType, provider) +} + +func Hypervisors2HostTypes(hypervisors []string) []string { + ret := []string{} + for _, driver := range hostDrivers { + if !utils.IsInStringArray(driver.GetHypervisor(), hypervisors) { + continue + } + if utils.IsInStringArray(driver.GetHostType(), ret) { + continue + } + ret = append(ret, driver.GetHostType()) } + return ret } diff --git a/pkg/compute/models/hosts.go b/pkg/compute/models/hosts.go index 486bb9daefe..e3cb4a42d82 100644 --- a/pkg/compute/models/hosts.go +++ b/pkg/compute/models/hosts.go @@ -366,11 +366,7 @@ func (manager *SHostManager) ListItemFilter( hypervisorStr := query.Hypervisor if len(hypervisorStr) > 0 { - hostType, ok := api.HYPERVISOR_HOSTTYPE[hypervisorStr] - if !ok { - return nil, httperrors.NewInputParameterError("not supported hypervisor %s", hypervisorStr) - } - q = q.Filter(sqlchemy.Equals(q.Field("host_type"), hostType)) + q = q.Filter(sqlchemy.In(q.Field("host_type"), Hypervisors2HostTypes([]string{query.Hypervisor}))) } usable := (query.Usable != nil && *query.Usable) @@ -733,11 +729,15 @@ func (hh *SHost) GetZone() (*SZone, error) { } func (hh *SHost) GetRegion() (*SCloudregion, error) { - zone, err := hh.GetZone() + zones := ZoneManager.Query("cloudregion_id").Equals("id", hh.ZoneId).SubQuery() + q := CloudregionManager.Query().In("id", zones) + ret := &SCloudregion{} + ret.SetModelManager(CloudregionManager, ret) + err := q.First(ret) if err != nil { return nil, err } - return zone.GetRegion() + return ret, nil } func (hh *SHost) GetCpuCount() int { @@ -1875,11 +1875,15 @@ func (hh *SHost) DeleteBaremetalnetwork(ctx context.Context, userCred mcclient.T } } -func (hh *SHost) GetHostDriver() IHostDriver { - if !utils.IsInStringArray(hh.HostType, api.HOST_TYPES) { - log.Fatalf("Unsupported host type %s", hh.HostType) +func (hh *SHost) GetHostDriver() (IHostDriver, error) { + if len(hh.HostType) == 0 { + hh.HostType = api.HOST_TYPE_DEFAULT + } + region, err := hh.GetRegion() + if err != nil { + return nil, errors.Wrapf(err, "GetRegion") } - return GetHostDriver(hh.HostType) + return GetHostDriver(hh.HostType, region.Provider) } func (manager *SHostManager) getHostsByZoneProvider(zone *SZone, region *SCloudregion, provider *SCloudprovider) ([]SHost, error) { @@ -1949,7 +1953,7 @@ func (manager *SHostManager) SyncHosts(ctx context.Context, userCred mcclient.To } for i := 0; i < len(commondb); i += 1 { if !xor { - err = commondb[i].syncWithCloudHost(ctx, userCred, commonext[i], provider) + err = commondb[i].SyncWithCloudHost(ctx, userCred, commonext[i]) if err != nil { syncResult.UpdateError(err) } @@ -1988,7 +1992,7 @@ func (hh *SHost) syncRemoveCloudHost(ctx context.Context, userCred mcclient.Toke return err } -func (hh *SHost) syncWithCloudHost(ctx context.Context, userCred mcclient.TokenCredential, extHost cloudprovider.ICloudHost, provider *SCloudprovider) error { +func (hh *SHost) SyncWithCloudHost(ctx context.Context, userCred mcclient.TokenCredential, extHost cloudprovider.ICloudHost) error { diff, err := db.UpdateWithLock(ctx, hh, func() error { // hh.Name = extHost.GetName() @@ -2010,6 +2014,10 @@ func (hh *SHost) syncWithCloudHost(ctx context.Context, userCred mcclient.TokenC hh.StorageSize = extHost.GetStorageSizeMB() hh.StorageType = extHost.GetStorageType() hh.HostType = extHost.GetHostType() + if hh.HostType == api.HOST_TYPE_BAREMETAL { + hh.IsBaremetal = true + } + hh.StorageInfo = extHost.GetStorageInfo() hh.OvnVersion = extHost.GetOvnVersion() if cpuCmt := extHost.GetCpuCmtbound(); cpuCmt > 0 { @@ -2042,6 +2050,7 @@ func (hh *SHost) syncWithCloudHost(ctx context.Context, userCred mcclient.TokenC db.OpsLog.LogSyncUpdate(hh, diff, userCred) + provider := hh.GetCloudprovider() if provider != nil { SyncCloudDomain(userCred, hh, provider.GetOwnerId()) hh.SyncShareState(ctx, userCred, provider.getAccountShareInfo()) @@ -2239,6 +2248,11 @@ func (manager *SHostManager) NewFromCloudHost(ctx context.Context, userCred mccl host.ZoneId = izone.Id host.HostType = extHost.GetHostType() + if host.HostType == api.HOST_TYPE_BAREMETAL { + host.IsBaremetal = true + } + host.StorageInfo = extHost.GetStorageInfo() + host.OvnVersion = extHost.GetOvnVersion() host.Status = extHost.GetStatus() @@ -4145,8 +4159,7 @@ func (hh *SHost) InitializedGuestStart(ctx context.Context, userCred mcclient.To if err != nil { return err } - task.ScheduleRun(nil) - return nil + return task.ScheduleRun(nil) } func (hh *SHost) InitializedGuestStop(ctx context.Context, userCred mcclient.TokenCredential, guest *SGuest) error { @@ -4186,8 +4199,7 @@ func (hh *SHost) PerformMaintenance(ctx context.Context, userCred mcclient.Token if err != nil { return nil, err } - task.ScheduleRun(nil) - return nil, nil + return nil, task.ScheduleRun(nil) } func (hh *SHost) PerformUnmaintenance(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) { @@ -4233,10 +4245,9 @@ func (hh *SHost) StartSyncstatus(ctx context.Context, userCred mcclient.TokenCre } task, err := taskman.TaskManager.NewTask(ctx, "BaremetalSyncStatusTask", hh, userCred, nil, parentTaskId, "", nil) if err != nil { - return err + return errors.Wrapf(err, "NewTask") } - task.ScheduleRun(nil) - return nil + return task.ScheduleRun(nil) } func (hh *SHost) PerformOffline(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input *api.HostOfflineInput) (jsonutils.JSONObject, error) { @@ -5369,9 +5380,9 @@ func (hh *SHost) PerformConvertHypervisor(ctx context.Context, userCred mcclient } else { ownerId = userCred } - driver := GetHostDriver(hostType) - if driver == nil { - return nil, httperrors.NewNotAcceptableError("Unsupport driver type %s", hostType) + driver, err := GetHostDriver(hostType, api.CLOUD_PROVIDER_ONECLOUD) + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") } if data.Contains("name") { name, _ := data.GetString("name") @@ -5440,11 +5451,11 @@ func (hh *SHost) PerformUndoConvert(ctx context.Context, userCred mcclient.Token if !utils.IsInStringArray(hh.Status, []string{api.BAREMETAL_READY, api.BAREMETAL_RUNNING}) { return nil, httperrors.NewNotAcceptableError("Cannot unconvert in status %s", hh.Status) } - driver := hh.GetDriverWithDefault() - if driver == nil { - return nil, httperrors.NewNotAcceptableError("Unsupport driver type %s", hh.HostType) + driver, err := hh.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") } - err := driver.PrepareUnconvert(hh) + err = driver.PrepareUnconvert(hh) if err != nil { return nil, httperrors.NewNotAcceptableError("%v", err) } @@ -5474,14 +5485,6 @@ func (hh *SHost) PerformUndoConvert(ctx context.Context, userCred mcclient.Token return nil, nil } -func (hh *SHost) GetDriverWithDefault() IHostDriver { - hostType := hh.HostType - if len(hostType) == 0 { - hostType = api.HOST_TYPE_DEFAULT - } - return GetHostDriver(hostType) -} - func (hh *SHost) UpdateDiskConfig(userCred mcclient.TokenCredential, layouts []baremetal.Layout) error { bs := hh.GetBaremetalstorage() if bs != nil { @@ -6383,8 +6386,7 @@ func (hh *SHost) startSyncConfig(ctx context.Context, userCred mcclient.TokenCre if err != nil { return err } - task.ScheduleRun(nil) - return nil + return task.ScheduleRun(nil) } func (model *SHost) CustomizeCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) error { @@ -6623,7 +6625,11 @@ func (manager *SHostManager) InitializeData() error { } func (hh *SHost) PerformProbeIsolatedDevices(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) { - return hh.GetHostDriver().RequestProbeIsolatedDevices(ctx, userCred, hh, data) + driver, err := hh.GetHostDriver() + if err != nil { + return nil, errors.Wrapf(err, "GetHostDriver") + } + return driver.RequestProbeIsolatedDevices(ctx, userCred, hh, data) } func (hh *SHost) GetPinnedCpusetCores(ctx context.Context, userCred mcclient.TokenCredential) (map[string][]int, error) { diff --git a/pkg/compute/models/hoststorages.go b/pkg/compute/models/hoststorages.go index efb3619145c..804e4cfe467 100644 --- a/pkg/compute/models/hoststorages.go +++ b/pkg/compute/models/hoststorages.go @@ -157,7 +157,12 @@ func (manager *SHoststorageManager) ValidateCreateData(ctx context.Context, user } host := hostObj.(*SHost) - input, err = host.GetHostDriver().ValidateAttachStorage(ctx, userCred, host, storage, input) + driver, err := host.GetHostDriver() + if err != nil { + return input, errors.Wrapf(err, "GetHostDriver") + } + + input, err = driver.ValidateAttachStorage(ctx, userCred, host, storage, input) if err != nil { return input, err } diff --git a/pkg/compute/models/loadbalanceragents_deploy.go b/pkg/compute/models/loadbalanceragents_deploy.go index 813dd6e778a..b7293f27fff 100644 --- a/pkg/compute/models/loadbalanceragents_deploy.go +++ b/pkg/compute/models/loadbalanceragents_deploy.go @@ -263,7 +263,11 @@ func (lbagent *SLoadbalancerAgent) validateHost(ctx context.Context, userCred mc return httperrors.NewNotFoundError("find guest %s: %v", name, err) } guest := obj.(*SGuest) - if utils.IsInStringArray(guest.Hypervisor, compute_apis.PUBLIC_CLOUD_HYPERVISORS) { + region, err := guest.GetRegion() + if err != nil { + return errors.Wrapf(err, "GetRegion") + } + if utils.IsInStringArray(region.Provider, compute_apis.PUBLIC_CLOUD_PROVIDERS) { return httperrors.NewBadRequestError("lbagent cannot be deployed on public guests") } if guest.Status != compute_apis.VM_RUNNING { diff --git a/pkg/compute/models/qemu_guest_agent.go b/pkg/compute/models/qemu_guest_agent.go index d6c64576ad8..f56d07d88e9 100644 --- a/pkg/compute/models/qemu_guest_agent.go +++ b/pkg/compute/models/qemu_guest_agent.go @@ -80,8 +80,15 @@ func (self *SGuest) PerformQgaPing( } res := jsonutils.NewDict() - host, _ := self.GetHost() - err := self.GetDriver().QgaRequestGuestPing(ctx, mcclient.GetTokenHeaders(userCred), host, self, false, input) + host, err := self.GetHost() + if err != nil { + return nil, err + } + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + err = drv.QgaRequestGuestPing(ctx, mcclient.GetTokenHeaders(userCred), host, self, false, input) if err != nil { res.Set("ping_error", jsonutils.NewString(err.Error())) } @@ -100,8 +107,15 @@ func (self *SGuest) PerformQgaCommand( if input.Command == "" { return nil, httperrors.NewMissingParameterError("command") } - host, _ := self.GetHost() - return self.GetDriver().RequestQgaCommand(ctx, userCred, jsonutils.Marshal(input), host, self) + host, err := self.GetHost() + if err != nil { + return nil, err + } + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + return drv.RequestQgaCommand(ctx, userCred, jsonutils.Marshal(input), host, self) } func (self *SGuest) PerformQgaGuestInfoTask( @@ -113,8 +127,15 @@ func (self *SGuest) PerformQgaGuestInfoTask( if self.PowerStates != api.VM_POWER_STATES_ON { return nil, httperrors.NewBadRequestError("can't use qga in vm status: %s", self.Status) } - host, _ := self.GetHost() - return self.GetDriver().QgaRequestGuestInfoTask(ctx, userCred, nil, host, self) + host, err := self.GetHost() + if err != nil { + return nil, err + } + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + return drv.QgaRequestGuestInfoTask(ctx, userCred, nil, host, self) } func (self *SGuest) PerformQgaGetNetwork( @@ -126,8 +147,15 @@ func (self *SGuest) PerformQgaGetNetwork( if self.PowerStates != api.VM_POWER_STATES_ON { return nil, httperrors.NewBadRequestError("can't use qga in vm status: %s", self.Status) } - host, _ := self.GetHost() - return self.GetDriver().QgaRequestGetNetwork(ctx, userCred, nil, host, self) + host, err := self.GetHost() + if err != nil { + return nil, err + } + drv, err := self.GetDriver() + if err != nil { + return nil, err + } + return drv.QgaRequestGetNetwork(ctx, userCred, nil, host, self) } func (self *SGuest) startQgaSyncOsInfoTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { diff --git a/pkg/compute/models/storagecaches.go b/pkg/compute/models/storagecaches.go index 110843fcac2..bde09963ad6 100644 --- a/pkg/compute/models/storagecaches.go +++ b/pkg/compute/models/storagecaches.go @@ -754,7 +754,14 @@ func (self *SStoragecache) IsReachCapacityLimit(imageId string) bool { } } host, _ := self.GetMasterHost() - return host.GetHostDriver().IsReachStoragecacheCapacityLimit(host, cachedImages) + if host == nil { + return false + } + driver, _ := host.GetHostDriver() + if driver == nil { + return false + } + return driver.IsReachStoragecacheCapacityLimit(host, cachedImages) } func (self *SStoragecache) GetStoragecachedimages() ([]SStoragecachedimage, error) { diff --git a/pkg/compute/models/storages.go b/pkg/compute/models/storages.go index b9c9d96a3cc..22208123bef 100644 --- a/pkg/compute/models/storages.go +++ b/pkg/compute/models/storages.go @@ -221,6 +221,28 @@ func (self *SStorage) IsNeedDeleteStoragecache() (bool, error) { return cnt == 0, nil } +func (manager *SStorageManager) GetStorageTypesByProvider(provider string) ([]string, error) { + q := manager.Query("storage_type") + providers := CloudproviderManager.Query().SubQuery() + q = q.Join(providers, sqlchemy.Equals(q.Field("manager_id"), providers.Field("id"))). + Filter(sqlchemy.Equals(providers.Field("provider"), provider)).Distinct() + storages := []string{} + rows, err := q.Rows() + if err != nil { + return nil, err + } + defer rows.Close() + for rows.Next() { + var storage string + err = rows.Scan(&storage) + if err != nil { + return nil, errors.Wrap(err, "rows.Scan(&storage)") + } + storages = append(storages, storage) + } + return storages, nil +} + func (manager *SStorageManager) GetStorageTypesByHostType(hostType string) ([]string, error) { q := manager.Query("storage_type") hosts := HostManager.Query().SubQuery() diff --git a/pkg/compute/storagedrivers/base.go b/pkg/compute/storagedrivers/base.go index 9e96d3584fd..47b4215c87a 100644 --- a/pkg/compute/storagedrivers/base.go +++ b/pkg/compute/storagedrivers/base.go @@ -97,11 +97,16 @@ func (self *SBaseStorageDriver) RequestDeleteSnapshot(ctx context.Context, snaps } } + drv, err := guest.GetDriver() + if err != nil { + return err + } + if jsonutils.QueryBoolean(task.GetParams(), "reload_disk", false) && snapshot.OutOfChain { guest.SetStatus(ctx, task.GetUserCred(), api.VM_SNAPSHOT, "Start Reload Snapshot") params := jsonutils.NewDict() params.Set("disk_id", jsonutils.NewString(snapshot.DiskId)) - return guest.GetDriver().RequestReloadDiskSnapshot(ctx, guest, task, params) + return drv.RequestReloadDiskSnapshot(ctx, guest, task, params) } else { convertSnapshot, err := models.SnapshotManager.GetConvertSnapshot(snapshot) if err != nil && err != sql.ErrNoRows { @@ -121,7 +126,7 @@ func (self *SBaseStorageDriver) RequestDeleteSnapshot(ctx context.Context, snaps params.Set("auto_deleted", jsonutils.JSONTrue) } guest.SetStatus(ctx, task.GetUserCred(), api.VM_SNAPSHOT_DELETE, "Start Delete Snapshot") - return guest.GetDriver().RequestDeleteSnapshot(ctx, guest, task, params) + return drv.RequestDeleteSnapshot(ctx, guest, task, params) } } diff --git a/pkg/compute/tasks/baremetal_convert_hypervisor_task.go b/pkg/compute/tasks/baremetal_convert_hypervisor_task.go index c78dc1aa5f8..65cdf9adef2 100644 --- a/pkg/compute/tasks/baremetal_convert_hypervisor_task.go +++ b/pkg/compute/tasks/baremetal_convert_hypervisor_task.go @@ -72,11 +72,17 @@ func (self *BaremetalConvertHypervisorTask) OnGuestDeployComplete(ctx context.Co guest := self.getGuest() hypervisor := self.getHypervisor() - driver := models.GetHostDriver(hypervisor) - if driver == nil { - self.SetStageFailed(ctx, jsonutils.NewString(fmt.Sprintf("Get Host Driver error %s", hypervisor))) + region, err := baremetal.GetRegion() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(fmt.Sprintf("Get Host region error %v", err))) + return + } + driver, err := models.GetHostDriver(hypervisor, region.Provider) + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(fmt.Sprintf("Get Host Driver error %v", err))) + return } - err := driver.FinishConvert(ctx, self.UserCred, baremetal, guest, driver.GetHostType()) + err = driver.FinishConvert(ctx, self.UserCred, baremetal, guest, driver.GetHostType()) if err != nil { log.Errorln(err) logclient.AddActionLogWithStartable(self, baremetal, logclient.ACT_BM_CONVERT_HYPER, fmt.Sprintf("convert deploy falied %s", err.Error()), self.UserCred, false) @@ -97,7 +103,12 @@ func (self *BaremetalConvertHypervisorTask) OnGuestDeployCompleteFailed(ctx cont func (self *BaremetalConvertHypervisorTask) OnGuestDeleteComplete(ctx context.Context, baremetal *models.SHost, body jsonutils.JSONObject) { hypervisor := self.getHypervisor() - driver := models.GetHostDriver(hypervisor) + region, err := baremetal.GetRegion() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(fmt.Sprintf("Get Host region error %v", err))) + return + } + driver, _ := models.GetHostDriver(hypervisor, region.Provider) if driver != nil { driver.ConvertFailed(baremetal) } diff --git a/pkg/compute/tasks/baremetal_maintenance_task.go b/pkg/compute/tasks/baremetal_maintenance_task.go index 5e2cc73bc3c..a4d9f537b42 100644 --- a/pkg/compute/tasks/baremetal_maintenance_task.go +++ b/pkg/compute/tasks/baremetal_maintenance_task.go @@ -16,7 +16,6 @@ package tasks import ( "context" - "fmt" "yunion.io/x/jsonutils" @@ -37,12 +36,16 @@ func init() { func (self *BaremetalMaintenanceTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) { baremetal := obj.(*models.SHost) - url := fmt.Sprintf("/baremetals/%s/maintenance", baremetal.Id) - headers := self.GetTaskRequestHeader() self.SetStage("OnEnterMaintenantModeSucc", nil) - _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, self.Params) + drv, err := baremetal.GetHostDriver() if err != nil { self.OnEnterMaintenantModeSuccFailed(ctx, baremetal, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestBaremetalMaintence(ctx, self.GetUserCred(), baremetal, self) + if err != nil { + self.OnEnterMaintenantModeSuccFailed(ctx, baremetal, jsonutils.NewString(err.Error())) + return } baremetal.SetStatus(ctx, self.UserCred, api.BAREMETAL_MAINTAINING, "") } diff --git a/pkg/compute/tasks/baremetal_sync_config_task.go b/pkg/compute/tasks/baremetal_sync_config_task.go index a9412e27a84..fddd7b4bee2 100644 --- a/pkg/compute/tasks/baremetal_sync_config_task.go +++ b/pkg/compute/tasks/baremetal_sync_config_task.go @@ -16,7 +16,6 @@ package tasks import ( "context" - "fmt" "yunion.io/x/jsonutils" @@ -45,9 +44,12 @@ func (self *BaremetalSyncConfigTask) OnInit(ctx context.Context, obj db.IStandal func (self *BaremetalSyncConfigTask) DoSyncConfig(ctx context.Context, baremetal *models.SHost) { self.SetStage("OnSyncConfigComplete", nil) - url := fmt.Sprintf("/baremetals/%s/sync-config", baremetal.Id) - headers := self.GetTaskRequestHeader() - _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, nil) + drv, err := baremetal.GetHostDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestSyncBaremetalHostConfig(ctx, self.GetUserCred(), baremetal, self) if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/baremetal_sync_status_task.go b/pkg/compute/tasks/baremetal_sync_status_task.go index d8aa66b7373..a9b1bb80ca3 100644 --- a/pkg/compute/tasks/baremetal_sync_status_task.go +++ b/pkg/compute/tasks/baremetal_sync_status_task.go @@ -16,11 +16,12 @@ package tasks import ( "context" - "fmt" "yunion.io/x/jsonutils" "yunion.io/x/log" + "yunion.io/x/pkg/errors" + "yunion.io/x/onecloud/pkg/apis" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/lockman" @@ -33,22 +34,30 @@ type BaremetalSyncStatusTask struct { SBaremetalBaseTask } +func (self *BaremetalSyncStatusTask) taskFailed(ctx context.Context, baremetal *models.SHost, err error) { + baremetal.SetStatus(ctx, self.GetUserCred(), apis.STATUS_UNKNOWN, err.Error()) + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) +} + func (self *BaremetalSyncStatusTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) { baremetal := obj.(*models.SHost) if baremetal.IsBaremetal { self.DoSyncStatus(ctx, baremetal) - } else { - self.SetStageComplete(ctx, nil) + return } + self.SetStageComplete(ctx, nil) } func (self *BaremetalSyncStatusTask) DoSyncStatus(ctx context.Context, baremetal *models.SHost) { + drv, err := baremetal.GetHostDriver() + if err != nil { + self.taskFailed(ctx, baremetal, errors.Wrapf(err, "GetHostDriver")) + return + } self.SetStage("OnSyncstatusComplete", nil) - url := fmt.Sprintf("/baremetals/%s/syncstatus", baremetal.Id) - headers := self.GetTaskRequestHeader() - _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, nil) + err = drv.RequestSyncBaremetalHostStatus(ctx, self.GetUserCred(), baremetal, self) if err != nil { - self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + self.taskFailed(ctx, baremetal, errors.Wrapf(err, "GetHostDriver")) return } } @@ -58,7 +67,7 @@ func (self *BaremetalSyncStatusTask) OnSyncstatusComplete(ctx context.Context, b } func (self *BaremetalSyncStatusTask) OnSyncstatusCompleteFailed(ctx context.Context, baremetal *models.SHost, body jsonutils.JSONObject) { - self.SetStageFailed(ctx, body) + self.taskFailed(ctx, baremetal, errors.Errorf(body.String())) } type BaremetalSyncAllGuestsStatusTask struct { diff --git a/pkg/compute/tasks/baremetal_unconvert_hypervisor_task.go b/pkg/compute/tasks/baremetal_unconvert_hypervisor_task.go index 0edd379bab6..7305dfbc482 100644 --- a/pkg/compute/tasks/baremetal_unconvert_hypervisor_task.go +++ b/pkg/compute/tasks/baremetal_unconvert_hypervisor_task.go @@ -61,8 +61,12 @@ func (self *BaremetalUnconvertHypervisorTask) OnInit(ctx context.Context, obj db func (self *BaremetalUnconvertHypervisorTask) OnGuestDeleteComplete(ctx context.Context, baremetal *models.SHost, body jsonutils.JSONObject) { db.OpsLog.LogEvent(baremetal, db.ACT_UNCONVERT_COMPLETE, "", self.UserCred) - driver := baremetal.GetDriverWithDefault() - err := driver.FinishUnconvert(ctx, self.UserCred, baremetal) + driver, err := baremetal.GetHostDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(errors.Wrapf(err, "GetHostDriver").Error())) + return + } + err = driver.FinishUnconvert(ctx, self.UserCred, baremetal) if err != nil { log.Errorf("Fail to exec finish_unconvert: %s", err.Error()) } diff --git a/pkg/compute/tasks/baremetal_unmaintenance_task.go b/pkg/compute/tasks/baremetal_unmaintenance_task.go index b8359ad5c95..5e878f85b53 100644 --- a/pkg/compute/tasks/baremetal_unmaintenance_task.go +++ b/pkg/compute/tasks/baremetal_unmaintenance_task.go @@ -16,7 +16,6 @@ package tasks import ( "context" - "fmt" "yunion.io/x/jsonutils" @@ -36,20 +35,23 @@ func init() { func (self *BaremetalUnmaintenanceTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) { baremetal := obj.(*models.SHost) - url := fmt.Sprintf("/baremetals/%s/unmaintenance", baremetal.Id) - headers := self.GetTaskRequestHeader() - self.SetStage("OnUnmaintenantComplete", nil) + drv, err := baremetal.GetHostDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } action := self.Action() - _, err := baremetal.BaremetalSyncRequest(ctx, "POST", url, headers, self.Params) + self.SetStage("OnUnmaintenantComplete", nil) + err = drv.RequestBaremetalUnmaintence(ctx, self.GetUserCred(), baremetal, self) if err != nil { if len(action) > 0 { logclient.AddActionLogWithStartable(self, baremetal, action, err, self.UserCred, false) } self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) - } else { - if len(action) > 0 { - logclient.AddActionLogWithStartable(self, baremetal, action, "", self.UserCred, true) - } + return + } + if len(action) > 0 { + logclient.AddActionLogWithStartable(self, baremetal, action, "", self.UserCred, true) } } diff --git a/pkg/compute/tasks/container_create_task.go b/pkg/compute/tasks/container_create_task.go index e2568cbd49d..6590e27ffed 100644 --- a/pkg/compute/tasks/container_create_task.go +++ b/pkg/compute/tasks/container_create_task.go @@ -40,7 +40,11 @@ func (t *ContainerBaseTask) GetPod() *models.SGuest { } func (t *ContainerBaseTask) GetPodDriver() models.IPodDriver { - return t.GetPod().GetDriver().(models.IPodDriver) + drv, err := t.GetPod().GetDriver() + if err != nil { + return nil + } + return drv.(models.IPodDriver) } type ContainerCreateTask struct { diff --git a/pkg/compute/tasks/disk_delete_task.go b/pkg/compute/tasks/disk_delete_task.go index ce6813ceeb1..c5bad763420 100644 --- a/pkg/compute/tasks/disk_delete_task.go +++ b/pkg/compute/tasks/disk_delete_task.go @@ -146,7 +146,12 @@ func (self *DiskDeleteTask) startDeleteDisk(ctx context.Context, disk *models.SD self.OnGuestDiskDeleteCompleteFailed(ctx, disk, jsonutils.NewString("fail to find master host")) return } - err = host.GetHostDriver().RequestDeallocateDiskOnHost(ctx, host, storage, disk, false, self) + driver, err := host.GetHostDriver() + if err != nil { + self.OnGuestDiskDeleteCompleteFailed(ctx, disk, jsonutils.NewString(errors.Wrapf(err, "GetHostDriver").Error())) + return + } + err = driver.RequestDeallocateDiskOnHost(ctx, host, storage, disk, false, self) if err != nil { self.OnGuestDiskDeleteCompleteFailed(ctx, disk, jsonutils.NewString(err.Error())) return @@ -161,7 +166,12 @@ func (self *DiskDeleteTask) OnMasterStorageDeleteDiskComplete(ctx context.Contex self.OnGuestDiskDeleteCompleteFailed(ctx, disk, jsonutils.NewString(fmt.Sprintf("backup storage %s fail to find master host", disk.BackupStorageId))) return } - err = host.GetHostDriver().RequestDeallocateDiskOnHost(ctx, host, storage, disk, false, self) + driver, err := host.GetHostDriver() + if err != nil { + self.OnGuestDiskDeleteCompleteFailed(ctx, disk, jsonutils.NewString(err.Error())) + return + } + err = driver.RequestDeallocateDiskOnHost(ctx, host, storage, disk, false, self) if err != nil { self.OnGuestDiskDeleteCompleteFailed(ctx, disk, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/disk_migrate_task.go b/pkg/compute/tasks/disk_migrate_task.go index 8efb7ca2d67..b05e08eeb5f 100644 --- a/pkg/compute/tasks/disk_migrate_task.go +++ b/pkg/compute/tasks/disk_migrate_task.go @@ -149,7 +149,12 @@ func (task *DiskMigrateTask) OnStorageCacheImage(ctx context.Context, disk *mode task.TaskFailed(ctx, disk, jsonutils.NewString(err.Error())) return } - ret, err := sourceHost.GetHostDriver().RequestDiskSrcMigratePrepare(ctx, sourceHost, disk, task) + driver, err := sourceHost.GetHostDriver() + if err != nil { + task.TaskFailed(ctx, disk, jsonutils.NewString(err.Error())) + return + } + ret, err := driver.RequestDiskSrcMigratePrepare(ctx, sourceHost, disk, task) if err != nil { task.TaskFailed(ctx, disk, jsonutils.NewString(err.Error())) return @@ -193,7 +198,12 @@ func (task *DiskMigrateTask) OnStorageCacheImage(ctx context.Context, disk *mode targetStorage := models.StorageManager.FetchStorageById(targetStorageId) task.SetStage("OnDiskMigrate", nil) - if err = targetHost.GetHostDriver().RequestDiskMigrate(ctx, targetHost, targetStorage, disk, task, body); err != nil { + targetDriver, err := targetHost.GetHostDriver() + if err != nil { + task.TaskFailed(ctx, disk, jsonutils.NewString(errors.Wrap(err, "GetHostDriver").Error())) + return + } + if err = targetDriver.RequestDiskMigrate(ctx, targetHost, targetStorage, disk, task, body); err != nil { task.TaskFailed(ctx, disk, jsonutils.NewString(fmt.Sprintf("failed request disk migrate %s", err))) return } @@ -230,7 +240,12 @@ func (task *DiskMigrateTask) OnDiskMigrate(ctx context.Context, disk *models.SDi } task.SetStage("OnDeallocateSourceDisk", nil) - err = srcHost.GetHostDriver().RequestDeallocateDiskOnHost(ctx, srcHost, srcStorage, disk, true, task) + driver, err := srcHost.GetHostDriver() + if err != nil { + task.TaskFailed(ctx, disk, jsonutils.NewString(fmt.Sprintf("GetHostDriver: %v", err))) + return + } + err = driver.RequestDeallocateDiskOnHost(ctx, srcHost, srcStorage, disk, true, task) if err != nil { task.TaskFailed(ctx, disk, jsonutils.NewString(fmt.Sprintf("failed deallocate disk on src storage %s", err))) return diff --git a/pkg/compute/tasks/disk_reset_task.go b/pkg/compute/tasks/disk_reset_task.go index 1e5d187225e..018a33b1a0f 100644 --- a/pkg/compute/tasks/disk_reset_task.go +++ b/pkg/compute/tasks/disk_reset_task.go @@ -124,7 +124,12 @@ func (self *DiskResetTask) RequestResetDisk(ctx context.Context, disk *models.SD params := snapshot.GetRegionDriver().GetDiskResetParams(snapshot) self.SetStage("OnRequestResetDisk", nil) - err = host.GetHostDriver().RequestResetDisk(ctx, host, disk, params, self) + driver, err := host.GetHostDriver() + if err != nil { + self.TaskFailed(ctx, disk, errors.Wrap(err, "GetHostDriver")) + return + } + err = driver.RequestResetDisk(ctx, host, disk, params, self) if err != nil { self.TaskFailed(ctx, disk, errors.Wrap(err, "RequestResetDisk")) } @@ -172,7 +177,12 @@ func (self *DiskCleanUpSnapshotsTask) StartCleanUpSnapshots(ctx context.Context, return } self.SetStage("OnCleanUpSnapshots", nil) - err := host.GetHostDriver().RequestCleanUpDiskSnapshots(ctx, host, disk, self.Params, self) + driver, err := host.GetHostDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(errors.Wrapf(err, "GetHostDriver").Error())) + return + } + err = driver.RequestCleanUpDiskSnapshots(ctx, host, disk, self.Params, self) if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/disk_resize_task.go b/pkg/compute/tasks/disk_resize_task.go index 4c8f694d015..2c981c8d9db 100644 --- a/pkg/compute/tasks/disk_resize_task.go +++ b/pkg/compute/tasks/disk_resize_task.go @@ -75,7 +75,12 @@ func (self *DiskResizeTask) StartResizeDisk(ctx context.Context, host *models.SH log.Infof("Resizing disk on host %s ...", host.GetName()) self.SetStage("OnDiskResizeComplete", nil) sizeMb, _ := self.GetParams().Int("size") - if err := host.GetHostDriver().RequestResizeDiskOnHost(ctx, host, storage, disk, sizeMb, self); err != nil { + driver, err := host.GetHostDriver() + if err != nil { + self.OnStartResizeDiskFailed(ctx, disk, err) + return + } + if err := driver.RequestResizeDiskOnHost(ctx, host, storage, disk, sizeMb, self); err != nil { log.Errorf("request_resize_disk_on_host: %v", err) self.OnStartResizeDiskFailed(ctx, disk, err) return diff --git a/pkg/compute/tasks/disk_save_task.go b/pkg/compute/tasks/disk_save_task.go index 5e7ef419993..ca8c8e8d123 100644 --- a/pkg/compute/tasks/disk_save_task.go +++ b/pkg/compute/tasks/disk_save_task.go @@ -80,7 +80,12 @@ func (self *DiskSaveTask) StartBackupDisk(ctx context.Context, disk *models.SDis self.SetStage("OnDiskBackupComplete", nil) disk.SetStatus(ctx, self.GetUserCred(), api.DISK_SAVING, "") imageId, _ := self.GetParams().GetString("image_id") - err := host.GetHostDriver().RequestPrepareSaveDiskOnHost(ctx, host, disk, imageId, self) + driver, err := host.GetHostDriver() + if err != nil { + self.taskFailed(ctx, disk, errors.Wrapf(err, "GetHostDriver")) + return + } + err = driver.RequestPrepareSaveDiskOnHost(ctx, host, disk, imageId, self) if err != nil { self.taskFailed(ctx, disk, errors.Wrapf(err, "RequestPrepareSaveDiskOnHost")) return @@ -122,8 +127,12 @@ func (self *DiskSaveTask) RefreshImageCache(ctx context.Context, imageId string) } func (self *DiskSaveTask) UploadDisk(ctx context.Context, host *models.SHost, disk *models.SDisk, imageId string, data *jsonutils.JSONDict) error { + driver, err := host.GetHostDriver() + if err != nil { + return errors.Wrapf(err, "GetHostDriver") + } self.SetStage("OnUploadDiskComplete", nil) - return host.GetHostDriver().RequestSaveUploadImageOnHost(ctx, host, disk, imageId, self, jsonutils.Marshal(data)) + return driver.RequestSaveUploadImageOnHost(ctx, host, disk, imageId, self, jsonutils.Marshal(data)) } func (self *DiskSaveTask) OnUploadDiskComplete(ctx context.Context, disk *models.SDisk, data jsonutils.JSONObject) { diff --git a/pkg/compute/tasks/guest_attach_disk_task.go b/pkg/compute/tasks/guest_attach_disk_task.go index 0a37260a1d4..b0160029c4b 100644 --- a/pkg/compute/tasks/guest_attach_disk_task.go +++ b/pkg/compute/tasks/guest_attach_disk_task.go @@ -67,7 +67,12 @@ func (self *GuestAttachDiskTask) OnInit(ctx context.Context, obj db.IStandaloneM } disk.SetStatus(ctx, self.UserCred, api.DISK_ATTACHING, "Disk attach") self.SetStage("OnSyncConfigComplete", nil) - guest.GetDriver().RequestAttachDisk(ctx, guest, disk, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnTaskFail(ctx, guest, disk, jsonutils.NewString("GetDriver")) + return + } + drv.RequestAttachDisk(ctx, guest, disk, self) } func (self *GuestAttachDiskTask) OnSyncConfigComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { diff --git a/pkg/compute/tasks/guest_backup_tasks.go b/pkg/compute/tasks/guest_backup_tasks.go index fbfe9db2c92..7dd767c0827 100644 --- a/pkg/compute/tasks/guest_backup_tasks.go +++ b/pkg/compute/tasks/guest_backup_tasks.go @@ -44,7 +44,11 @@ func (task *GuestSwitchToBackupTask) OnInit(ctx context.Context, obj db.IStandal host, _ := guest.GetHost() task.Params.Set("is_force", jsonutils.JSONTrue) task.SetStage("OnEnsureMasterGuestStoped", nil) - err := guest.GetDriver().RequestStopOnHost(ctx, guest, host, task, true) + drv, err := guest.GetDriver() + if err != nil { + task.OnEnsureMasterGuestStoped(ctx, guest, nil) + } + err = drv.RequestStopOnHost(ctx, guest, host, task, true) if err != nil { // In case of master host crash task.OnEnsureMasterGuestStoped(ctx, guest, nil) @@ -55,7 +59,12 @@ func (task *GuestSwitchToBackupTask) OnEnsureMasterGuestStoped(ctx context.Conte backupHost := models.HostManager.FetchHostById(guest.BackupHostId) task.Params.Set("is_force", jsonutils.JSONTrue) task.SetStage("OnBackupGuestStoped", nil) - err := guest.GetDriver().RequestStopOnHost(ctx, guest, backupHost, task, true) + drv, err := guest.GetDriver() + if err != nil { + task.OnFail(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStopOnHost(ctx, guest, backupHost, task, true) if err != nil { task.OnFail(ctx, guest, jsonutils.NewString(err.Error())) } @@ -138,7 +147,12 @@ func (task *GuestStartAndSyncToBackupTask) OnInit(ctx context.Context, obj db.IS func (task *GuestStartAndSyncToBackupTask) checkTemplete(ctx context.Context, guest *models.SGuest) { diskCat := guest.CategorizeDisks() if diskCat.Root != nil && len(diskCat.Root.GetTemplateId()) > 0 { - err := guest.GetDriver().CheckDiskTemplateOnStorage(ctx, task.UserCred, diskCat.Root.GetTemplateId(), diskCat.Root.DiskFormat, + drv, err := guest.GetDriver() + if err != nil { + task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.CheckDiskTemplateOnStorage(ctx, task.UserCred, diskCat.Root.GetTemplateId(), diskCat.Root.DiskFormat, diskCat.Root.BackupStorageId, task) if err != nil { task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) @@ -160,7 +174,12 @@ func (task *GuestStartAndSyncToBackupTask) OnCheckTemplete(ctx context.Context, } else { task.Params.Set("block_ready", jsonutils.JSONTrue) } - err := guest.GetDriver().RequestStartOnHost(ctx, guest, host, task.UserCred, task) + drv, err := guest.GetDriver() + if err != nil { + task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStartOnHost(ctx, guest, host, task.UserCred, task) if err != nil { task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } @@ -186,7 +205,12 @@ func (task *GuestStartAndSyncToBackupTask) OnStartBackupGuest(ctx context.Contex if utils.IsInStringArray(guestStatus, api.VM_RUNNING_STATUS) { task.SetStage("OnRequestSyncToBackup", nil) - err = guest.GetDriver().RequestSyncToBackup(ctx, guest, task) + drv, err := guest.GetDriver() + if err != nil { + task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestSyncToBackup(ctx, guest, task) if err != nil { task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } @@ -203,7 +227,14 @@ func (task *GuestStartAndSyncToBackupTask) OnStartBackupGuestFailed(ctx context. func (task *GuestStartAndSyncToBackupTask) OnRequestSyncToBackup(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { guest.SetGuestBackupMirrorJobInProgress(ctx, task.UserCred) - err := guest.GetDriver().RequestSlaveBlockStreamDisks(ctx, guest, task) + drv, err := guest.GetDriver() + if err != nil { + guest.SetGuestBackupMirrorJobFailed(ctx, task.UserCred) + guest.SetBackupGuestStatus(task.UserCred, api.VM_BLOCK_STREAM_FAIL, err.Error()) + task.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestSlaveBlockStreamDisks(ctx, guest, task) if err != nil { guest.SetGuestBackupMirrorJobFailed(ctx, task.UserCred) guest.SetBackupGuestStatus(task.UserCred, api.VM_BLOCK_STREAM_FAIL, err.Error()) diff --git a/pkg/compute/tasks/guest_batch_create_task.go b/pkg/compute/tasks/guest_batch_create_task.go index c91a2b668f5..88027eb7217 100644 --- a/pkg/compute/tasks/guest_batch_create_task.go +++ b/pkg/compute/tasks/guest_batch_create_task.go @@ -210,8 +210,14 @@ func (task *GuestBatchCreateTask) allocateGuestOnHost(ctx context.Context, guest input.Eip = eip.Id } + drv, err := guest.GetDriver() + if err != nil { + guest.SetStatus(ctx, task.UserCred, api.VM_DISK_FAILED, err.Error()) + return err + } + // allocate disks - extraDisks, err := guest.GetDriver().PrepareDiskRaidConfig(task.UserCred, host, input.BaremetalDiskConfigs, input.Disks) + extraDisks, err := drv.PrepareDiskRaidConfig(task.UserCred, host, input.BaremetalDiskConfigs, input.Disks) if err != nil { log.Errorf("PrepareDiskRaidConfig fail: %s", err) guest.SetStatus(ctx, task.UserCred, api.VM_DISK_FAILED, err.Error()) diff --git a/pkg/compute/tasks/guest_change_config_task.go b/pkg/compute/tasks/guest_change_config_task.go index 24231702da0..aab836740d3 100644 --- a/pkg/compute/tasks/guest_change_config_task.go +++ b/pkg/compute/tasks/guest_change_config_task.go @@ -211,7 +211,12 @@ func (task *GuestChangeConfigTask) OnCreateDisksComplete(ctx context.Context, ob } func (task *GuestChangeConfigTask) startGuestChangeCpuMemSpec(ctx context.Context, guest *models.SGuest, instanceType string, vcpuCount, cpuSockets int, vmemSize int) { - err := guest.GetDriver().RequestChangeVmConfig(ctx, guest, task, instanceType, int64(vcpuCount), int64(cpuSockets), int64(vmemSize)) + drv, err := guest.GetDriver() + if err != nil { + task.markStageFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestChangeVmConfig(ctx, guest, task, instanceType, int64(vcpuCount), int64(cpuSockets), int64(vmemSize)) if err != nil { task.markStageFailed(ctx, guest, jsonutils.NewString(err.Error())) return @@ -313,7 +318,12 @@ func (task *GuestChangeConfigTask) OnGuestChangeCpuMemSpecComplete(ctx context.C } func (task *GuestChangeConfigTask) OnGuestChangeCpuMemSpecCompleteFailed(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { - if err := guest.GetDriver().OnGuestChangeCpuMemFailed(ctx, guest, data.(*jsonutils.JSONDict), task); err != nil { + drv, err := guest.GetDriver() + if err != nil { + task.markStageFailed(ctx, guest, data) + return + } + if err := drv.OnGuestChangeCpuMemFailed(ctx, guest, data.(*jsonutils.JSONDict), task); err != nil { log.Errorln(err) } task.markStageFailed(ctx, guest, data) @@ -333,7 +343,12 @@ func (task *GuestChangeConfigTask) OnGuestChangeCpuMemSpecFinish(ctx context.Con // resetTraffics := []api.ServerNicTrafficLimit{} // task.Params.Unmarshal(&resetTraffics, "reset_traffic_limits") task.SetStage("OnGuestResetNicTraffics", nil) - err := guest.GetDriver().RequestResetNicTrafficLimit(ctx, task, host, guest, confs.ResetTrafficLimits) + drv, err := guest.GetDriver() + if err != nil { + task.markStageFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestResetNicTrafficLimit(ctx, task, host, guest, confs.ResetTrafficLimits) if err != nil { task.markStageFailed(ctx, guest, jsonutils.NewString(err.Error())) return @@ -372,7 +387,12 @@ func (task *GuestChangeConfigTask) OnGuestResetNicTraffics(ctx context.Context, host, _ := guest.GetHost() setTraffics := confs.SetTrafficLimits task.SetStage("OnGuestSetNicTraffics", nil) - err := guest.GetDriver().RequestSetNicTrafficLimit(ctx, task, host, guest, setTraffics) + drv, err := guest.GetDriver() + if err != nil { + task.markStageFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestSetNicTrafficLimit(ctx, task, host, guest, setTraffics) if err != nil { task.markStageFailed(ctx, guest, jsonutils.NewString(err.Error())) return diff --git a/pkg/compute/tasks/guest_change_disk_storage_task.go b/pkg/compute/tasks/guest_change_disk_storage_task.go index 86e84bf3b41..b53b4b8da69 100644 --- a/pkg/compute/tasks/guest_change_disk_storage_task.go +++ b/pkg/compute/tasks/guest_change_disk_storage_task.go @@ -97,7 +97,12 @@ func (t *GuestChangeDiskStorageTask) ChangeDiskStorage(ctx context.Context, gues } // create target disk - if err := guest.GetDriver().RequestChangeDiskStorage(ctx, t.GetUserCred(), guest, input, t); err != nil { + drv, err := guest.GetDriver() + if err != nil { + t.TaskFailed(ctx, guest, jsonutils.NewString(fmt.Sprintf("GetDriver: %s", err))) + return + } + if err := drv.RequestChangeDiskStorage(ctx, t.GetUserCred(), guest, input, t); err != nil { t.TaskFailed(ctx, guest, jsonutils.NewString(fmt.Sprintf("RequestChangeDiskStorage: %s", err))) return } @@ -155,7 +160,12 @@ func (t *GuestChangeDiskStorageTask) OnDiskLiveChangeStorageReady( t.SetStage("OnDiskChangeStorageComplete", nil) // block job ready, start switch to target storage disk - err = guest.GetDriver().RequestSwitchToTargetStorageDisk(ctx, t.UserCred, guest, input, t) + drv, err := guest.GetDriver() + if err != nil { + t.TaskFailed(ctx, guest, jsonutils.NewString(fmt.Sprintf("GetDriver: %s", err))) + return + } + err = drv.RequestSwitchToTargetStorageDisk(ctx, t.UserCred, guest, input, t) if err != nil { t.TaskFailed(ctx, guest, jsonutils.NewString(fmt.Sprintf("OnDiskLiveChangeStorageReady: %s", err))) return @@ -272,7 +282,12 @@ func (t *GuestChangeDiskStorageTask) attachTargetDisk(ctx context.Context, guest attachData := jsonutils.Marshal(confData).(*jsonutils.JSONDict) attachData.Add(jsonutils.NewString(targetDisk.GetId()), "disk_id") - return guest.GetDriver().StartGuestAttachDiskTask(ctx, t.GetUserCred(), guest, attachData, t.GetTaskId()) + drv, err := guest.GetDriver() + if err != nil { + return err + } + + return drv.StartGuestAttachDiskTask(ctx, t.GetUserCred(), guest, attachData, t.GetTaskId()) } func (t *GuestChangeDiskStorageTask) OnTargetDiskAttachComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { diff --git a/pkg/compute/tasks/guest_cpuset_task.go b/pkg/compute/tasks/guest_cpuset_task.go index 07820474bda..408bb80b060 100644 --- a/pkg/compute/tasks/guest_cpuset_task.go +++ b/pkg/compute/tasks/guest_cpuset_task.go @@ -53,7 +53,12 @@ func (self *GuestCPUSetTask) OnSyncComplete(ctx context.Context, obj *models.SGu host, _ := obj.GetHost() input := new(api.ServerCPUSetInput) self.GetParams().Unmarshal(input) - _, err := obj.GetDriver().RequestCPUSet(ctx, self.GetUserCred(), host, obj, input) + drv, err := obj.GetDriver() + if err != nil { + self.setStageFailed(ctx, obj, jsonutils.NewString(err.Error())) + return + } + _, err = drv.RequestCPUSet(ctx, self.GetUserCred(), host, obj, input) if err != nil { self.setStageFailed(ctx, obj, jsonutils.NewString(err.Error())) return diff --git a/pkg/compute/tasks/guest_create_disk_task.go b/pkg/compute/tasks/guest_create_disk_task.go index bbf36f0f83e..29c5f7701e3 100644 --- a/pkg/compute/tasks/guest_create_disk_task.go +++ b/pkg/compute/tasks/guest_create_disk_task.go @@ -37,7 +37,12 @@ type GuestCreateDiskTask struct { func (self *GuestCreateDiskTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { self.SetStage("OnDiskPrepared", nil) guest := obj.(*models.SGuest) - err := guest.GetDriver().DoGuestCreateDisksTask(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.DoGuestCreateDisksTask(ctx, guest, self) if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/guest_create_task.go b/pkg/compute/tasks/guest_create_task.go index da53097bb5c..f2838367f44 100644 --- a/pkg/compute/tasks/guest_create_task.go +++ b/pkg/compute/tasks/guest_create_task.go @@ -62,7 +62,12 @@ func (self *GuestCreateTask) OnWaitGuestNetworksReady(ctx context.Context, obj d func (self *GuestCreateTask) OnGuestNetworkReady(ctx context.Context, guest *models.SGuest) { guest.SetStatus(ctx, self.UserCred, api.VM_CREATE_DISK, "") self.SetStage("OnDiskPrepared", nil) - err := guest.GetDriver().RequestGuestCreateAllDisks(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnDiskPreparedFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestGuestCreateAllDisks(ctx, guest, self) if err != nil { msg := fmt.Sprintf("unable to RequestGuestCreateAllDisks: %v", err) self.OnDiskPreparedFailed(ctx, guest, jsonutils.NewString(msg)) @@ -103,6 +108,7 @@ func (self *GuestCreateTask) OnDiskPrepared(ctx context.Context, guest *models.S self.OnSecurityGroupPreparedFailed(ctx, guest, jsonutils.NewString(errors.Wrapf(err, "GetRegion").Error())) return } + self.SetStage("OnSecurityGroupPrepared", nil) err = region.GetDriver().RequestPrepareSecurityGroups(ctx, self.UserCred, guest.GetOwnerId(), secgroups, vpc, func(ids []string) error { return guest.SaveSecgroups(ctx, self.UserCred, ids) @@ -149,7 +155,12 @@ func (self *GuestCreateTask) OnSecurityGroupPrepared(ctx context.Context, guest guest.SetAllMetadata(ctx, imagePros, self.UserCred) } self.SetStage("OnCdromPrepared", nil) - guest.GetDriver().RequestGuestCreateInsertIso(ctx, cdrom, bootIndex, self, guest) + drv, err := guest.GetDriver() + if err != nil { + self.OnCdromPreparedFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + drv.RequestGuestCreateInsertIso(ctx, cdrom, bootIndex, self, guest) } else { self.OnCdromPrepared(ctx, guest, data) } diff --git a/pkg/compute/tasks/guest_delete_on_host_task.go b/pkg/compute/tasks/guest_delete_on_host_task.go index 3388fe5e2f8..291a10a1758 100644 --- a/pkg/compute/tasks/guest_delete_on_host_task.go +++ b/pkg/compute/tasks/guest_delete_on_host_task.go @@ -50,7 +50,12 @@ func (self *GuestDeleteOnHostTask) OnInit(ctx context.Context, obj db.IStandalon self.SetStage("OnStopGuest", nil) self.Params.Set("is_force", jsonutils.JSONTrue) - if err := guest.GetDriver().RequestStopOnHost(ctx, guest, host, self, true); err != nil { + drv, err := guest.GetDriver() + if err != nil { + self.OnFail(ctx, guest, jsonutils.NewString(err.Error())) + return + } + if err := drv.RequestStopOnHost(ctx, guest, host, self, true); err != nil { log.Errorf("RequestStopGuestForDelete fail %s", err) self.OnStopGuest(ctx, guest, nil) } diff --git a/pkg/compute/tasks/guest_delete_task.go b/pkg/compute/tasks/guest_delete_task.go index 6bfe52f65ba..d583a27e3f3 100644 --- a/pkg/compute/tasks/guest_delete_task.go +++ b/pkg/compute/tasks/guest_delete_task.go @@ -53,15 +53,20 @@ func (self *GuestDeleteTask) OnInit(ctx context.Context, obj db.IStandaloneModel self.OnGuestStopComplete(ctx, guest, data) return } + drv, err := guest.GetDriver() + if err != nil { + self.OnGuestStopComplete(ctx, guest, data) + return + } if len(guest.BackupHostId) > 0 { self.SetStage("OnMasterHostStopGuestComplete", nil) - if err := guest.GetDriver().RequestStopGuestForDelete(ctx, guest, nil, self); err != nil { + if err := drv.RequestStopGuestForDelete(ctx, guest, nil, self); err != nil { log.Errorf("RequestStopGuestForDelete fail %s", err) self.OnMasterHostStopGuestComplete(ctx, guest, nil) } } else { self.SetStage("OnGuestStopComplete", nil) - if err := guest.GetDriver().RequestStopGuestForDelete(ctx, guest, nil, self); err != nil { + if err := drv.RequestStopGuestForDelete(ctx, guest, nil, self); err != nil { log.Errorf("RequestStopGuestForDelete fail %s", err) self.OnGuestStopComplete(ctx, guest, nil) } @@ -71,7 +76,12 @@ func (self *GuestDeleteTask) OnInit(ctx context.Context, obj db.IStandaloneModel func (self *GuestDeleteTask) OnMasterHostStopGuestComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { self.SetStage("OnGuestStopComplete", nil) host := models.HostManager.FetchHostById(guest.BackupHostId) - err := guest.GetDriver().RequestStopGuestForDelete(ctx, guest, host, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnGuestStopComplete(ctx, guest, nil) + return + } + err = drv.RequestStopGuestForDelete(ctx, guest, host, self) if err != nil { log.Errorf("RequestStopGuestForDelete fail %s", err) self.OnGuestStopComplete(ctx, guest, nil) @@ -287,7 +297,12 @@ func (self *GuestDeleteTask) StartDeleteGuest(ctx context.Context, guest *models } // No snapshot self.SetStage("OnGuestDetachDisksComplete", nil) - guest.GetDriver().RequestDetachDisksFromGuestForDelete(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnGuestDeleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + drv.RequestDetachDisksFromGuestForDelete(ctx, guest, self) } func (self *GuestDeleteTask) OnGuestDetachDisksComplete(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { @@ -342,7 +357,10 @@ func (self *GuestDeleteTask) OnGuestDeleteComplete(ctx context.Context, obj db.I guest.EjectAllIso(self.UserCred) guest.EjectAllVfd(self.UserCred) guest.DeleteEip(ctx, self.UserCred) - guest.GetDriver().OnDeleteGuestFinalCleanup(ctx, guest, self.UserCred) + drv, _ := guest.GetDriver() + if drv != nil { + drv.OnDeleteGuestFinalCleanup(ctx, guest, self.UserCred) + } self.DeleteGuest(ctx, guest) } diff --git a/pkg/compute/tasks/guest_deploy_task.go b/pkg/compute/tasks/guest_deploy_task.go index e42082034db..e817369835b 100644 --- a/pkg/compute/tasks/guest_deploy_task.go +++ b/pkg/compute/tasks/guest_deploy_task.go @@ -65,7 +65,12 @@ func (self *GuestDeployTask) OnDeployWaitServerStop(ctx context.Context, guest * } func (self *GuestDeployTask) DeployOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost) { - err := guest.GetDriver().RequestDeployGuestOnHost(ctx, guest, host, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnDeployGuestFail(ctx, guest, err) + return + } + err = drv.RequestDeployGuestOnHost(ctx, guest, host, self) if err != nil { log.Errorf("request_deploy_guest_on_host %s", err) self.OnDeployGuestFail(ctx, guest, err) @@ -84,7 +89,12 @@ func (self *GuestDeployTask) OnDeployGuestFail(ctx context.Context, guest *model func (self *GuestDeployTask) OnDeployGuestComplete(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { log.Infof("on_guest_deploy_task_data_received %s", data) guest := obj.(*models.SGuest) - guest.GetDriver().OnGuestDeployTaskDataReceived(ctx, guest, self, data) + drv, err := guest.GetDriver() + if err != nil { + self.OnDeployGuestCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + drv.OnGuestDeployTaskDataReceived(ctx, guest, self, data) action, _ := self.Params.GetString("deploy_action") keypair, _ := self.Params.GetString("keypair") reset_password := jsonutils.QueryBoolean(self.Params, "reset_password", false) diff --git a/pkg/compute/tasks/guest_detach_disk_task.go b/pkg/compute/tasks/guest_detach_disk_task.go index 7604c5924b8..a002fb3babb 100644 --- a/pkg/compute/tasks/guest_detach_disk_task.go +++ b/pkg/compute/tasks/guest_detach_disk_task.go @@ -68,8 +68,14 @@ func (self *GuestDetachDiskTask) OnInit(ctx context.Context, obj db.IStandaloneM return } + drv, err := guest.GetDriver() + if err != nil { + self.OnDetachDiskCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + self.SetStage("OnDetachDiskComplete", nil) - err = guest.GetDriver().RequestDetachDisk(ctx, guest, disk, self) + err = drv.RequestDetachDisk(ctx, guest, disk, self) if err != nil { self.OnDetachDiskCompleteFailed(ctx, guest, jsonutils.Marshal(map[string]string{"error": err.Error()})) } @@ -114,7 +120,12 @@ func (self *GuestDetachDiskTask) OnDetachDiskComplete(ctx context.Context, guest if cnt == 0 { self.SetStage("OnDiskDeleteComplete", nil) db.OpsLog.LogEvent(disk, db.ACT_DELETE, "", self.UserCred) - err := guest.GetDriver().RequestDeleteDetachedDisk(ctx, disk, self, purge) + drv, err := guest.GetDriver() + if err != nil { + self.OnTaskFail(ctx, guest, disk, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestDeleteDetachedDisk(ctx, disk, self, purge) if err != nil { self.OnTaskFail(ctx, guest, disk, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/guest_disk_snapshot_task.go b/pkg/compute/tasks/guest_disk_snapshot_task.go index 2a081755c02..bc2dfcaec53 100644 --- a/pkg/compute/tasks/guest_disk_snapshot_task.go +++ b/pkg/compute/tasks/guest_disk_snapshot_task.go @@ -57,7 +57,12 @@ func (self *GuestDiskSnapshotTask) DoDiskSnapshot(ctx context.Context, guest *mo params.Set("guest_old_status", jsonutils.NewString(guest.Status)) self.SetStage("OnDiskSnapshotComplete", params) guest.SetStatus(ctx, self.UserCred, api.VM_SNAPSHOT, "") - err = guest.GetDriver().RequestDiskSnapshot(ctx, guest, self, snapshotId, diskId) + drv, err := guest.GetDriver() + if err != nil { + self.TaskFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestDiskSnapshot(ctx, guest, self, snapshotId, diskId) if err != nil { self.TaskFailed(ctx, guest, jsonutils.NewString(err.Error())) return diff --git a/pkg/compute/tasks/guest_eject_iso_task.go b/pkg/compute/tasks/guest_eject_iso_task.go index 552624f6123..095c3068698 100644 --- a/pkg/compute/tasks/guest_eject_iso_task.go +++ b/pkg/compute/tasks/guest_eject_iso_task.go @@ -42,7 +42,12 @@ func (self *GuestEjectISOTask) startEjectIso(ctx context.Context, obj db.IStanda cdromOrdinal, _ := self.Params.Int("cdrom_ordinal") if guest.EjectIso(cdromOrdinal, self.UserCred) && guest.Status == api.VM_RUNNING { self.SetStage("OnConfigSyncComplete", nil) - guest.GetDriver().RequestGuestHotRemoveIso(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + drv.RequestGuestHotRemoveIso(ctx, guest, self) } else { self.SetStageComplete(ctx, nil) } diff --git a/pkg/compute/tasks/guest_eject_vfd_task.go b/pkg/compute/tasks/guest_eject_vfd_task.go index 70095b48e49..483bad41595 100644 --- a/pkg/compute/tasks/guest_eject_vfd_task.go +++ b/pkg/compute/tasks/guest_eject_vfd_task.go @@ -42,7 +42,12 @@ func (self *GuestEjectVFDTask) startEjectVfd(ctx context.Context, obj db.IStanda floppyOrdinal, _ := self.Params.Int("floppy_ordinal") if guest.EjectVfd(floppyOrdinal, self.UserCred) && guest.Status == api.VM_RUNNING { self.SetStage("OnConfigSyncComplete", nil) - guest.GetDriver().RequestGuestHotRemoveVfd(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.Marshal(map[string]string{"reason": err.Error()})) + return + } + drv.RequestGuestHotRemoveVfd(ctx, guest, self) } else { self.SetStageComplete(ctx, nil) } diff --git a/pkg/compute/tasks/guest_insert_iso_task.go b/pkg/compute/tasks/guest_insert_iso_task.go index 6e9b4713700..36a65c5c4b1 100644 --- a/pkg/compute/tasks/guest_insert_iso_task.go +++ b/pkg/compute/tasks/guest_insert_iso_task.go @@ -94,10 +94,15 @@ func (self *GuestInsertIsoTask) OnIsoPrepareComplete(ctx context.Context, obj db guest := obj.(*models.SGuest) if cdrom, ok := guest.InsertIsoSucc(cdromOrdinal, imageId, path, size, name, bootIndex); ok { db.OpsLog.LogEvent(guest, db.ACT_ISO_ATTACH, cdrom.GetDetails(), self.UserCred) - if guest.GetDriver().NeedRequestGuestHotAddIso(ctx, guest) { + drv, err := guest.GetDriver() + if err != nil { + self.OnIsoPrepareCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + if drv.NeedRequestGuestHotAddIso(ctx, guest) { self.SetStage("OnConfigSyncComplete", nil) boot := jsonutils.QueryBoolean(self.Params, "boot", false) - guest.GetDriver().RequestGuestHotAddIso(ctx, guest, path, boot, self) + drv.RequestGuestHotAddIso(ctx, guest, path, boot, self) } else { self.SetStageComplete(ctx, nil) } diff --git a/pkg/compute/tasks/guest_insert_vfd_task.go b/pkg/compute/tasks/guest_insert_vfd_task.go index ddecf75f5a1..c3555f58721 100644 --- a/pkg/compute/tasks/guest_insert_vfd_task.go +++ b/pkg/compute/tasks/guest_insert_vfd_task.go @@ -86,10 +86,15 @@ func (self *GuestInsertVfdTask) OnVfdPrepareComplete(ctx context.Context, obj db guest := obj.(*models.SGuest) if guest.InsertVfdSucc(floppyOrdinal, imageId, path, size, name) { db.OpsLog.LogEvent(guest, db.ACT_VFD_ATTACH, guest.GetDetailsVfd(floppyOrdinal, self.UserCred), self.UserCred) - if guest.GetDriver().NeedRequestGuestHotAddVfd(ctx, guest) { + drv, err := guest.GetDriver() + if err != nil { + self.OnVfdPrepareCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + if drv.NeedRequestGuestHotAddVfd(ctx, guest) { self.SetStage("OnConfigSyncComplete", nil) boot := jsonutils.QueryBoolean(self.Params, "boot", false) - guest.GetDriver().RequestGuestHotAddVfd(ctx, guest, path, boot, self) + drv.RequestGuestHotAddVfd(ctx, guest, path, boot, self) } else { self.SetStageComplete(ctx, nil) } diff --git a/pkg/compute/tasks/guest_live_migrate_task.go b/pkg/compute/tasks/guest_live_migrate_task.go index 312aaacfb00..288d29027b8 100644 --- a/pkg/compute/tasks/guest_live_migrate_task.go +++ b/pkg/compute/tasks/guest_live_migrate_task.go @@ -753,7 +753,12 @@ func (task *ManagedGuestMigrateTask) MigrateStart(ctx context.Context, guest *mo guest.SetStatus(ctx, task.UserCred, api.VM_MIGRATING, "") input := api.GuestMigrateInput{} task.GetParams().Unmarshal(&input) - if err := guest.GetDriver().RequestMigrate(ctx, guest, task.UserCred, input, task); err != nil { + drv, err := guest.GetDriver() + if err != nil { + task.OnMigrateCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + if err := drv.RequestMigrate(ctx, guest, task.UserCred, input, task); err != nil { task.OnMigrateCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) } } @@ -807,7 +812,12 @@ func (task *ManagedGuestLiveMigrateTask) MigrateStart(ctx context.Context, guest guest.SetStatus(ctx, task.UserCred, api.VM_MIGRATING, "") input := api.GuestLiveMigrateInput{} task.GetParams().Unmarshal(&input) - if err := guest.GetDriver().RequestLiveMigrate(ctx, guest, task.UserCred, input, task); err != nil { + drv, err := guest.GetDriver() + if err != nil { + task.OnMigrateCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + if err := drv.RequestLiveMigrate(ctx, guest, task.UserCred, input, task); err != nil { task.OnMigrateCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) } } diff --git a/pkg/compute/tasks/guest_publicip_to_eip_task.go b/pkg/compute/tasks/guest_publicip_to_eip_task.go index 837e225e5f2..dcaccb87245 100644 --- a/pkg/compute/tasks/guest_publicip_to_eip_task.go +++ b/pkg/compute/tasks/guest_publicip_to_eip_task.go @@ -38,7 +38,15 @@ func (self *GuestPublicipToEipTask) OnInit(ctx context.Context, obj db.IStandalo guest := obj.(*models.SGuest) self.SetStage("OnEipConvertComplete", nil) - err := guest.GetDriver().RequestConvertPublicipToEip(ctx, self.GetUserCred(), guest, self) + drv, err := guest.GetDriver() + if err != nil { + db.OpsLog.LogEvent(guest, db.ACT_EIP_CONVERT_FAIL, err, self.UserCred) + logclient.AddActionLogWithStartable(self, guest, logclient.ACT_EIP_CONVERT, err, self.UserCred, false) + guest.SetStatus(ctx, self.GetUserCred(), api.VM_EIP_CONVERT_FAILED, err.Error()) + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestConvertPublicipToEip(ctx, self.GetUserCred(), guest, self) if err != nil { db.OpsLog.LogEvent(guest, db.ACT_EIP_CONVERT_FAIL, err, self.UserCred) logclient.AddActionLogWithStartable(self, guest, logclient.ACT_EIP_CONVERT, err, self.UserCred, false) diff --git a/pkg/compute/tasks/guest_qga_reset_password_task.go b/pkg/compute/tasks/guest_qga_reset_password_task.go index 7ec4d9b6e58..89949c74847 100644 --- a/pkg/compute/tasks/guest_qga_reset_password_task.go +++ b/pkg/compute/tasks/guest_qga_reset_password_task.go @@ -37,7 +37,11 @@ func (self *SGuestQgaBaseTask) guestPing(ctx context.Context, guest *models.SGue if err != nil { return err } - return guest.GetDriver().QgaRequestGuestPing(ctx, self.GetTaskRequestHeader(), host, guest, true, nil) + drv, err := guest.GetDriver() + if err != nil { + return err + } + return drv.QgaRequestGuestPing(ctx, self.GetTaskRequestHeader(), host, guest, true, nil) } func (self *SGuestQgaBaseTask) taskFailed(ctx context.Context, guest *models.SGuest, reason string) { @@ -73,7 +77,12 @@ func (self *GuestQgaSetPasswordTask) OnQgaGuestPing(ctx context.Context, guest * self.taskFailed(ctx, guest, err.Error()) return } - err = guest.GetDriver().QgaRequestSetUserPassword(ctx, self, host, guest, input) + drv, err := guest.GetDriver() + if err != nil { + self.taskFailed(ctx, guest, err.Error()) + return + } + err = drv.QgaRequestSetUserPassword(ctx, self, host, guest, input) if err != nil { self.taskFailed(ctx, guest, err.Error()) } diff --git a/pkg/compute/tasks/guest_qga_restart_network_task.go b/pkg/compute/tasks/guest_qga_restart_network_task.go index 8c3cdf1bcc2..07549abbf61 100644 --- a/pkg/compute/tasks/guest_qga_restart_network_task.go +++ b/pkg/compute/tasks/guest_qga_restart_network_task.go @@ -78,7 +78,11 @@ func (self *GuestQgaRestartNetworkTask) requestSetNetwork(ctx context.Context, g // if success, log network related information logclient.AddActionLogWithStartable(self, guest, logclient.ACT_QGA_NETWORK_INPUT, inputQgaNet, self.UserCred, true) - return guest.GetDriver().QgaRequestSetNetwork(ctx, self.UserCred, jsonutils.Marshal(inputQgaNet), host, guest) + drv, err := guest.GetDriver() + if err != nil { + return nil, err + } + return drv.QgaRequestSetNetwork(ctx, self.UserCred, jsonutils.Marshal(inputQgaNet), host, guest) } func (self *GuestQgaRestartNetworkTask) taskFailed(ctx context.Context, guest *models.SGuest, prevIp string, inBlockStream bool, err error) { diff --git a/pkg/compute/tasks/guest_qga_sync_os_info_task.go b/pkg/compute/tasks/guest_qga_sync_os_info_task.go index d07e189cfe3..9e8db6012bf 100644 --- a/pkg/compute/tasks/guest_qga_sync_os_info_task.go +++ b/pkg/compute/tasks/guest_qga_sync_os_info_task.go @@ -40,13 +40,22 @@ func (self *GuestQgaSyncOsInfoTask) guestPing(ctx context.Context, guest *models if err != nil { return err } - return guest.GetDriver().QgaRequestGuestPing(ctx, self.GetTaskRequestHeader(), host, guest, true, nil) + drv, err := guest.GetDriver() + if err != nil { + return err + } + return drv.QgaRequestGuestPing(ctx, self.GetTaskRequestHeader(), host, guest, true, nil) } func (self *GuestQgaSyncOsInfoTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) host, _ := guest.GetHost() - res, err := guest.GetDriver().QgaRequestGetOsInfo(ctx, self.UserCred, nil, host, guest) + drv, err := guest.GetDriver() + if err != nil { + self.taskFailed(ctx, guest, err.Error()) + return + } + res, err := drv.QgaRequestGetOsInfo(ctx, self.UserCred, nil, host, guest) if err != nil { self.taskFailed(ctx, guest, err.Error()) return diff --git a/pkg/compute/tasks/guest_rebuild_root_task.go b/pkg/compute/tasks/guest_rebuild_root_task.go index f902fa13f3f..78bd1337e64 100644 --- a/pkg/compute/tasks/guest_rebuild_root_task.go +++ b/pkg/compute/tasks/guest_rebuild_root_task.go @@ -103,7 +103,12 @@ func (self *GuestRebuildRootTask) StartRebuildRootDisk(ctx context.Context, gues loginParams["login_key_timestamp"] = "none" guest.SetAllMetadata(ctx, loginParams, self.UserCred) - guest.GetDriver().RequestRebuildRootDisk(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnRebuildRootDiskCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + drv.RequestRebuildRootDisk(ctx, guest, self) } func (self *GuestRebuildRootTask) OnRebuildRootDiskComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { @@ -270,7 +275,12 @@ func (self *ManagedGuestRebuildRootTask) OnInit(ctx context.Context, obj db.ISta guest := obj.(*models.SGuest) self.SetStage("OnHostCacheImageComplete", nil) - guest.GetDriver().RequestGuestCreateAllDisks(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnHostCacheImageCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + drv.RequestGuestCreateAllDisks(ctx, guest, self) } func (self *ManagedGuestRebuildRootTask) OnHostCacheImageComplete(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { diff --git a/pkg/compute/tasks/guest_remote_update_task.go b/pkg/compute/tasks/guest_remote_update_task.go index 79d858f8db1..120580dddca 100644 --- a/pkg/compute/tasks/guest_remote_update_task.go +++ b/pkg/compute/tasks/guest_remote_update_task.go @@ -39,7 +39,11 @@ func (self *GuestRemoteUpdateTask) OnInit(ctx context.Context, obj db.IStandalon self.SetStage("OnRemoteUpdateComplete", nil) replaceTags := jsonutils.QueryBoolean(self.Params, "replace_tags", false) taskman.LocalTaskRun(self, func() (jsonutils.JSONObject, error) { - err := guest.GetDriver().RequestRemoteUpdate(ctx, guest, self.UserCred, replaceTags) + drv, err := guest.GetDriver() + if err != nil { + return nil, err + } + err = drv.RequestRemoteUpdate(ctx, guest, self.UserCred, replaceTags) if err != nil { return nil, errors.Wrap(err, "RequestRemoteUpdate") } diff --git a/pkg/compute/tasks/guest_renew_task.go b/pkg/compute/tasks/guest_renew_task.go index 7efab66ba3e..68ad02c73e0 100644 --- a/pkg/compute/tasks/guest_renew_task.go +++ b/pkg/compute/tasks/guest_renew_task.go @@ -20,6 +20,7 @@ import ( "yunion.io/x/jsonutils" "yunion.io/x/log" + "yunion.io/x/pkg/errors" "yunion.io/x/pkg/util/billing" api "yunion.io/x/onecloud/pkg/apis/compute" @@ -38,20 +39,29 @@ func init() { taskman.RegisterTask(PrepaidRecycleHostRenewTask{}) } +func (self *GuestRenewTask) taskFailed(ctx context.Context, guest *models.SGuest, err error) { + db.OpsLog.LogEvent(guest, db.ACT_REW_FAIL, err, self.UserCred) + logclient.AddActionLogWithStartable(self, guest, logclient.ACT_RENEW, err, self.UserCred, false) + guest.SetStatus(ctx, self.GetUserCred(), api.VM_RENEW_FAILED, err.Error()) + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return +} + func (self *GuestRenewTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) durationStr, _ := self.GetParams().GetString("duration") bc, _ := billing.ParseBillingCycle(durationStr) - exp, err := guest.GetDriver().RequestRenewInstance(ctx, guest, bc) + drv, err := guest.GetDriver() if err != nil { - msg := fmt.Sprintf("RequestRenewInstance failed %s", err) - log.Errorf(msg) - db.OpsLog.LogEvent(guest, db.ACT_REW_FAIL, err, self.UserCred) - logclient.AddActionLogWithStartable(self, guest, logclient.ACT_RENEW, err, self.UserCred, false) - guest.SetStatus(ctx, self.GetUserCred(), api.VM_RENEW_FAILED, msg) - self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + self.taskFailed(ctx, guest, errors.Wrapf(err, "GetDriver")) + return + } + + exp, err := drv.RequestRenewInstance(ctx, guest, bc) + if err != nil { + self.taskFailed(ctx, guest, errors.Wrapf(err, "RequestRenewInstance")) return } diff --git a/pkg/compute/tasks/guest_rescue_task.go b/pkg/compute/tasks/guest_rescue_task.go index 4932d509f00..1843590b9b2 100644 --- a/pkg/compute/tasks/guest_rescue_task.go +++ b/pkg/compute/tasks/guest_rescue_task.go @@ -71,7 +71,12 @@ func (self *StartRescueTask) PrepareRescue(ctx context.Context, guest *models.SG guest.SetStatus(ctx, self.UserCred, api.VM_START_RESCUE, "PrepareRescue") host, _ := guest.GetHost() - err := guest.GetDriver().RequestStartRescue(ctx, self, jsonutils.NewDict(), host, guest) + drv, err := guest.GetDriver() + if err != nil { + self.OnRescuePrepareCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStartRescue(ctx, self, jsonutils.NewDict(), host, guest) if err != nil { self.OnRescuePrepareCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return @@ -100,7 +105,12 @@ func (self *StartRescueTask) RescueStartServer(ctx context.Context, guest *model // Set Guest rescue params to guest start params host, _ := guest.GetHost() - err := guest.GetDriver().RequestStartOnHost(ctx, guest, host, self.UserCred, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnRescueStartServerCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStartOnHost(ctx, guest, host, self.UserCred, self) if err != nil { self.OnRescueStartServerCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return @@ -192,7 +202,12 @@ func (self *StopRescueTask) RescueStartServer(ctx context.Context, guest *models // Set Guest rescue params to guest start params host, _ := guest.GetHost() - err := guest.GetDriver().RequestStartOnHost(ctx, guest, host, self.UserCred, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnRescueStartServerCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStartOnHost(ctx, guest, host, self.UserCred, self) if err != nil { self.OnRescueStartServerCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return diff --git a/pkg/compute/tasks/guest_reset_task.go b/pkg/compute/tasks/guest_reset_task.go index b34bc7acab4..f522ceb430f 100644 --- a/pkg/compute/tasks/guest_reset_task.go +++ b/pkg/compute/tasks/guest_reset_task.go @@ -37,12 +37,17 @@ type GuestSoftResetTask struct { func (self *GuestSoftResetTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) - err := guest.GetDriver().RequestSoftReset(ctx, guest, self) - if err == nil { - self.SetStageComplete(ctx, nil) - } else { + drv, err := guest.GetDriver() + if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return } + err = drv.RequestSoftReset(ctx, guest, self) + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + self.SetStageComplete(ctx, nil) } type GuestHardResetTask struct { diff --git a/pkg/compute/tasks/guest_resume_task.go b/pkg/compute/tasks/guest_resume_task.go index af894ee549f..60a0b396ea0 100644 --- a/pkg/compute/tasks/guest_resume_task.go +++ b/pkg/compute/tasks/guest_resume_task.go @@ -37,7 +37,12 @@ func (self *GuestResumeTask) OnInit(ctx context.Context, obj db.IStandaloneModel guest := obj.(*models.SGuest) db.OpsLog.LogEvent(guest, db.ACT_RESUMING, "", self.UserCred) self.SetStage("OnResumeComplete", nil) - err := guest.GetDriver().RequestResumeOnHost(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnResumeGuestFail(guest, err.Error()) + return + } + err = drv.RequestResumeOnHost(ctx, guest, self) if err != nil { self.OnResumeGuestFail(guest, err.Error()) } diff --git a/pkg/compute/tasks/guest_save_image_task.go b/pkg/compute/tasks/guest_save_image_task.go index 49af38ebb2e..7760863b69f 100644 --- a/pkg/compute/tasks/guest_save_image_task.go +++ b/pkg/compute/tasks/guest_save_image_task.go @@ -49,7 +49,12 @@ func (self *GuestSaveImageTask) OnInit(ctx context.Context, obj db.IStandaloneMo func (self *GuestSaveImageTask) OnStopServerComplete(ctx context.Context, guest *models.SGuest, body jsonutils.JSONObject) { self.SetStage("OnSaveRootImageComplete", nil) - err := guest.GetDriver().RequestSaveImage(ctx, self.GetUserCred(), guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnSaveRootImageCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestSaveImage(ctx, self.GetUserCred(), guest, self) if err != nil { self.OnSaveRootImageCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return diff --git a/pkg/compute/tasks/guest_set_auto_renew_task.go b/pkg/compute/tasks/guest_set_auto_renew_task.go index ddc59a7dd0d..b3c86dc8977 100644 --- a/pkg/compute/tasks/guest_set_auto_renew_task.go +++ b/pkg/compute/tasks/guest_set_auto_renew_task.go @@ -18,6 +18,7 @@ import ( "context" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" @@ -34,19 +35,28 @@ func init() { taskman.RegisterTask(GuestSetAutoRenewTask{}) } +func (self *GuestSetAutoRenewTask) taskFailed(ctx context.Context, guest *models.SGuest, err error) { + db.OpsLog.LogEvent(guest, db.ACT_SET_AUTO_RENEW_FAIL, err, self.UserCred) + logclient.AddActionLogWithStartable(self, guest, logclient.ACT_SET_AUTO_RENEW, err, self.UserCred, false) + guest.SetStatus(ctx, self.GetUserCred(), api.VM_SET_AUTO_RENEW_FAILED, err.Error()) + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + +} + func (self *GuestSetAutoRenewTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) self.SetStage("OnSetAutoRenewComplete", nil) input := api.GuestAutoRenewInput{} self.GetParams().Unmarshal(&input) - err := guest.GetDriver().RequestSetAutoRenewInstance(ctx, self.UserCred, guest, input, self) + drv, err := guest.GetDriver() + if err != nil { + self.taskFailed(ctx, guest, errors.Wrapf(err, "GetDriver")) + return + } + err = drv.RequestSetAutoRenewInstance(ctx, self.UserCred, guest, input, self) if err != nil { - // msg := fmt.Sprintf("RequestSetAutoRenewInstance failed %s", err) - db.OpsLog.LogEvent(guest, db.ACT_SET_AUTO_RENEW_FAIL, err, self.UserCred) - logclient.AddActionLogWithStartable(self, guest, logclient.ACT_SET_AUTO_RENEW, err, self.UserCred, false) - guest.SetStatus(ctx, self.GetUserCred(), api.VM_SET_AUTO_RENEW_FAILED, err.Error()) - self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + self.taskFailed(ctx, guest, errors.Wrapf(err, "RequestSetAutoRenewInstance")) return } } diff --git a/pkg/compute/tasks/guest_start_task.go b/pkg/compute/tasks/guest_start_task.go index cb8705c26b4..c1d8364211a 100644 --- a/pkg/compute/tasks/guest_start_task.go +++ b/pkg/compute/tasks/guest_start_task.go @@ -49,7 +49,12 @@ func (self *GuestStartTask) RequestStart(ctx context.Context, guest *models.SGue self.SetStage("OnStartComplete", nil) host, _ := guest.GetHost() guest.SetStatus(ctx, self.UserCred, api.VM_STARTING, "") - err := guest.GetDriver().RequestStartOnHost(ctx, guest, host, self.UserCred, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnStartCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStartOnHost(ctx, guest, host, self.UserCred, self) if err != nil { self.OnStartCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return diff --git a/pkg/compute/tasks/guest_stop_task.go b/pkg/compute/tasks/guest_stop_task.go index 3b2ad7992c2..12fd783a2b3 100644 --- a/pkg/compute/tasks/guest_stop_task.go +++ b/pkg/compute/tasks/guest_stop_task.go @@ -53,7 +53,12 @@ func (self *GuestStopTask) stopGuest(ctx context.Context, guest *models.SGuest) guest.SetStatus(ctx, self.GetUserCred(), api.VM_STOPPING, "") } self.SetStage("OnGuestStopTaskComplete", nil) - err = guest.GetDriver().RequestStopOnHost(ctx, guest, host, self, !self.IsSubtask()) + drv, err := guest.GetDriver() + if err != nil { + self.OnGuestStopTaskCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStopOnHost(ctx, guest, host, self, !self.IsSubtask()) if err != nil { self.OnGuestStopTaskCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/guest_suspend_task.go b/pkg/compute/tasks/guest_suspend_task.go index 7105ccecf9c..a6c29ef7dca 100644 --- a/pkg/compute/tasks/guest_suspend_task.go +++ b/pkg/compute/tasks/guest_suspend_task.go @@ -39,7 +39,12 @@ func (self *GuestSuspendTask) OnInit(ctx context.Context, obj db.IStandaloneMode db.OpsLog.LogEvent(guest, db.ACT_STOPPING, "", self.UserCred) guest.SetStatus(ctx, self.UserCred, api.VM_SUSPENDING, "GuestSusPendTask") self.SetStage("OnSuspendComplete", nil) - err := guest.GetDriver().RequestSuspendOnHost(ctx, guest, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnSuspendGuestFail(guest, err.Error()) + return + } + err = drv.RequestSuspendOnHost(ctx, guest, self) if err != nil { self.OnSuspendGuestFail(guest, err.Error()) } diff --git a/pkg/compute/tasks/guest_sync_isolated_device_task.go b/pkg/compute/tasks/guest_sync_isolated_device_task.go index eeaf9291665..9635cfdb916 100644 --- a/pkg/compute/tasks/guest_sync_isolated_device_task.go +++ b/pkg/compute/tasks/guest_sync_isolated_device_task.go @@ -47,7 +47,13 @@ func (self *GuestIsolatedDeviceSyncTask) onTaskFail(ctx context.Context, guest * func (self *GuestIsolatedDeviceSyncTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) self.SetStage("OnSyncConfigComplete", nil) - if err := guest.GetDriver().RequestSyncIsolatedDevice(ctx, guest, self); err != nil { + drv, err := guest.GetDriver() + if err != nil { + self.onTaskFail(ctx, guest, jsonErrorObj(err)) + return + } + err = drv.RequestSyncIsolatedDevice(ctx, guest, self) + if err != nil { self.onTaskFail(ctx, guest, jsonErrorObj(err)) return } diff --git a/pkg/compute/tasks/guest_sync_nic_traffics_task.go b/pkg/compute/tasks/guest_sync_nic_traffics_task.go index f198e36841e..8b76ba3070b 100644 --- a/pkg/compute/tasks/guest_sync_nic_traffics_task.go +++ b/pkg/compute/tasks/guest_sync_nic_traffics_task.go @@ -53,7 +53,12 @@ func (self *GuestResetNicTrafficsTask) OnInit(ctx context.Context, obj db.IStand input := compute.ServerNicTrafficLimit{} self.GetParams().Unmarshal(&input) self.SetStage("OnResetNicTrafficLimit", nil) - err = guest.GetDriver().RequestResetNicTrafficLimit(ctx, self, host, guest, []compute.ServerNicTrafficLimit{input}) + drv, err := guest.GetDriver() + if err != nil { + self.taskFailed(ctx, guest, err.Error()) + return + } + err = drv.RequestResetNicTrafficLimit(ctx, self, host, guest, []compute.ServerNicTrafficLimit{input}) if err != nil { self.taskFailed(ctx, guest, err.Error()) } @@ -102,7 +107,12 @@ func (self *GuestSetNicTrafficsTask) OnInit(ctx context.Context, obj db.IStandal input := compute.ServerNicTrafficLimit{} self.GetParams().Unmarshal(&input) self.SetStage("OnSetNicTrafficLimit", nil) - err = guest.GetDriver().RequestSetNicTrafficLimit(ctx, self, host, guest, []compute.ServerNicTrafficLimit{input}) + drv, err := guest.GetDriver() + if err != nil { + self.taskFailed(ctx, guest, err.Error()) + return + } + err = drv.RequestSetNicTrafficLimit(ctx, self, host, guest, []compute.ServerNicTrafficLimit{input}) if err != nil { self.taskFailed(ctx, guest, err.Error()) } diff --git a/pkg/compute/tasks/guest_sync_task.go b/pkg/compute/tasks/guest_sync_task.go index c723fe9d43e..d5a9ba089ab 100644 --- a/pkg/compute/tasks/guest_sync_task.go +++ b/pkg/compute/tasks/guest_sync_task.go @@ -43,15 +43,20 @@ func init() { func (self *GuestSyncConfTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) db.OpsLog.LogEvent(guest, db.ACT_SYNC_CONF, nil, self.UserCred) - if host, _ := guest.GetHost(); host == nil { - self.SetStageFailed(ctx, jsonutils.NewString("No host for sync")) + host, err := guest.GetHost() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + self.SetStage("OnSyncComplete", nil) + drv, err := guest.GetDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) return - } else { - self.SetStage("OnSyncComplete", nil) - if err := guest.GetDriver().RequestSyncConfigOnHost(ctx, guest, host, self); err != nil { - self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) - log.Errorf("SyncConfTask faled %v", err) - } + } + err = drv.RequestSyncConfigOnHost(ctx, guest, host, self) + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } } @@ -95,8 +100,15 @@ func (self *GuestSyncConfTask) StartRestartNetworkTask(ctx context.Context, gues // try use qga restart network err = func() error { - host, _ := guest.GetHost() - err = guest.GetDriver().QgaRequestGuestPing(ctx, self.GetTaskRequestHeader(), host, guest, false, &api.ServerQgaTimeoutInput{1000}) + host, err := guest.GetHost() + if err != nil { + return err + } + drv, err := guest.GetDriver() + if err != nil { + return err + } + err = drv.QgaRequestGuestPing(ctx, self.GetTaskRequestHeader(), host, guest, false, &api.ServerQgaTimeoutInput{1000}) if err != nil { return errors.Wrap(err, "qga guest-ping") } diff --git a/pkg/compute/tasks/guest_syncstatus_task.go b/pkg/compute/tasks/guest_syncstatus_task.go index b23ed2980ff..790c2134baa 100644 --- a/pkg/compute/tasks/guest_syncstatus_task.go +++ b/pkg/compute/tasks/guest_syncstatus_task.go @@ -16,6 +16,7 @@ package tasks import ( "context" + "fmt" "yunion.io/x/cloudmux/pkg/cloudprovider" "yunion.io/x/jsonutils" @@ -38,17 +39,25 @@ func init() { func (self *GuestSyncstatusTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { guest := obj.(*models.SGuest) - host, _ := guest.GetHost() - if host == nil || host.HostStatus == api.HOST_OFFLINE { - log.Errorf("host is not reachable") - guest.SetStatus(ctx, self.UserCred, api.VM_UNKNOWN, "Host not responding") + host, err := guest.GetHost() + if err != nil { + guest.SetStatus(ctx, self.UserCred, api.VM_UNKNOWN, fmt.Sprintf("get host error: %v", err)) + self.SetStageComplete(ctx, nil) + return + } + if !host.IsBaremetal && host.HostStatus == api.HOST_OFFLINE { + guest.SetStatus(ctx, self.UserCred, api.VM_UNKNOWN, "host offline") self.SetStageComplete(ctx, nil) return } self.SetStage("OnGetStatusComplete", nil) - err := guest.GetDriver().RequestSyncstatusOnHost(ctx, guest, host, self.UserCred, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnGetStatusCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestSyncstatusOnHost(ctx, guest, host, self.UserCred, self) if err != nil { - log.Errorf("request_syncstatus_on_host: %s", err) self.OnGetStatusCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return } diff --git a/pkg/compute/tasks/guest_undeploy_task.go b/pkg/compute/tasks/guest_undeploy_task.go index af3cffcda64..8b75e55583c 100644 --- a/pkg/compute/tasks/guest_undeploy_task.go +++ b/pkg/compute/tasks/guest_undeploy_task.go @@ -48,7 +48,12 @@ func (self *GuestUndeployTask) OnInit(ctx context.Context, obj db.IStandaloneMod host = models.HostManager.FetchHostById(targetHostId) } if host != nil { - err := guest.GetDriver().RequestUndeployGuestOnHost(ctx, guest, host, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnStartDeleteGuestFail(ctx, err) + return + } + err = drv.RequestUndeployGuestOnHost(ctx, guest, host, self) if err != nil { self.OnStartDeleteGuestFail(ctx, err) } @@ -61,7 +66,12 @@ func (self *GuestUndeployTask) OnMasterHostUndeployGuestComplete(ctx context.Con self.SetStage("OnGuestUndeployComplete", nil) host := models.HostManager.FetchHostById(guest.BackupHostId) if host != nil { - err := guest.GetDriver().RequestUndeployGuestOnHost(ctx, guest, host, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnStartDeleteGuestFail(ctx, err) + return + } + err = drv.RequestUndeployGuestOnHost(ctx, guest, host, self) if err != nil { self.OnStartDeleteGuestFail(ctx, err) } diff --git a/pkg/compute/tasks/ha_guest_deploy_task.go b/pkg/compute/tasks/ha_guest_deploy_task.go index e515a674ab5..6644fadf880 100644 --- a/pkg/compute/tasks/ha_guest_deploy_task.go +++ b/pkg/compute/tasks/ha_guest_deploy_task.go @@ -19,7 +19,6 @@ import ( "fmt" "yunion.io/x/jsonutils" - "yunion.io/x/log" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" @@ -50,13 +49,17 @@ func (self *HAGuestDeployTask) OnDeployWaitServerStop( func (self *HAGuestDeployTask) DeployBackup(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { self.SetStage("OnDeploySlaveGuestComplete", nil) host := models.HostManager.FetchHostById(guest.BackupHostId) - err := guest.GetDriver().RequestDeployGuestOnHost(ctx, guest, host, self) + drv, err := guest.GetDriver() if err != nil { - log.Errorf("request_deploy_guest_on_host %s", err) self.OnDeployGuestFail(ctx, guest, err) - } else { - guest.SetStatus(ctx, self.UserCred, api.VM_DEPLOYING_BACKUP, "") + return + } + err = drv.RequestDeployGuestOnHost(ctx, guest, host, self) + if err != nil { + self.OnDeployGuestFail(ctx, guest, err) + return } + guest.SetStatus(ctx, self.UserCred, api.VM_DEPLOYING_BACKUP, "") } func (self *HAGuestDeployTask) OnDeploySlaveGuestComplete( @@ -87,9 +90,13 @@ func (self *GuestDeployBackupTask) OnInit(ctx context.Context, obj db.IStandalon } self.SetStage("OnDeployGuestComplete", nil) host := models.HostManager.FetchHostById(guest.BackupHostId) - err := guest.GetDriver().RequestDeployGuestOnHost(ctx, guest, host, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnDeployGuestCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestDeployGuestOnHost(ctx, guest, host, self) if err != nil { - log.Errorf("request_deploy_guest_on_host %s", err) self.OnDeployGuestCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) } } diff --git a/pkg/compute/tasks/ha_guest_start_task.go b/pkg/compute/tasks/ha_guest_start_task.go index 79e78c31219..bf06eddde2a 100644 --- a/pkg/compute/tasks/ha_guest_start_task.go +++ b/pkg/compute/tasks/ha_guest_start_task.go @@ -52,7 +52,13 @@ func (self *HAGuestStartTask) RequestStopBackupGuest(ctx context.Context, guest host := models.HostManager.FetchHostById(guest.BackupHostId) self.SetStage("OnBackupGuestStopComplete", nil) guest.SetStatus(ctx, self.UserCred, api.VM_BACKUP_STOPING, "HAGuestStartTask") - err := guest.GetDriver().RequestStopOnHost(ctx, guest, host, self, false) + drv, err := guest.GetDriver() + if err != nil { + guest.SetStatus(ctx, self.UserCred, api.VM_BACKUP_START_FAILED, err.Error()) + self.SetStageFailed(ctx, nil) + return + } + err = drv.RequestStopOnHost(ctx, guest, host, self, false) if err != nil { guest.SetStatus(ctx, self.UserCred, api.VM_BACKUP_START_FAILED, err.Error()) self.SetStageFailed(ctx, nil) @@ -87,7 +93,13 @@ func (self *HAGuestStartTask) RequestStartBacking(ctx context.Context, guest *mo self.Params.Set("block_ready", jsonutils.JSONTrue) } - err := guest.GetDriver().RequestStartOnHost(ctx, guest, host, self.UserCred, self) + drv, err := guest.GetDriver() + if err != nil { + self.OnStartCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + + err = drv.RequestStartOnHost(ctx, guest, host, self.UserCred, self) if err != nil { self.OnStartCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) return @@ -126,7 +138,13 @@ func (self *HAGuestStartTask) OnStartBackupGuestCompleteFailed( func (self *HAGuestStartTask) OnStartComplete(ctx context.Context, guest *models.SGuest, data jsonutils.JSONObject) { if !guest.IsGuestBackupMirrorJobReady(ctx, self.UserCred) { - if err := guest.GetDriver().RequestSlaveBlockStreamDisks(ctx, guest, self); err != nil { + drv, err := guest.GetDriver() + if err != nil { + guest.SetGuestBackupMirrorJobFailed(ctx, self.UserCred) + guest.SetBackupGuestStatus(self.UserCred, api.VM_BLOCK_STREAM_FAIL, err.Error()) + return + } + if err := drv.RequestSlaveBlockStreamDisks(ctx, guest, self); err != nil { guest.SetGuestBackupMirrorJobFailed(ctx, self.UserCred) guest.SetBackupGuestStatus(self.UserCred, api.VM_BLOCK_STREAM_FAIL, err.Error()) } else { diff --git a/pkg/compute/tasks/ha_guest_stop_task.go b/pkg/compute/tasks/ha_guest_stop_task.go index 9960bfb81cc..513cae4de94 100644 --- a/pkg/compute/tasks/ha_guest_stop_task.go +++ b/pkg/compute/tasks/ha_guest_stop_task.go @@ -44,7 +44,12 @@ func (self *HAGuestStopTask) OnGuestStopTaskComplete( } self.SetStage("OnSlaveGuestStopTaskComplete", nil) - err := guest.GetDriver().RequestStopOnHost(ctx, guest, host, self, true) + drv, err := guest.GetDriver() + if err != nil { + self.OnGuestStopTaskCompleteFailed(ctx, guest, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestStopOnHost(ctx, guest, host, self, true) if err != nil { log.Errorf("RequestStopOnHost fail %s", err) self.OnGuestStopTaskCompleteFailed( diff --git a/pkg/compute/tasks/host_storage_attach_task.go b/pkg/compute/tasks/host_storage_attach_task.go index d626ee6ff05..eaf6575fc86 100644 --- a/pkg/compute/tasks/host_storage_attach_task.go +++ b/pkg/compute/tasks/host_storage_attach_task.go @@ -19,6 +19,7 @@ import ( "fmt" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" @@ -34,7 +35,7 @@ func init() { taskman.RegisterTask(HostStorageAttachTask{}) } -func (self *HostStorageAttachTask) taskFail(ctx context.Context, host *models.SHost, reason jsonutils.JSONObject) { +func (self *HostStorageAttachTask) taskFail(ctx context.Context, host *models.SHost, reason error) { if hoststorage := self.getHoststorage(host); hoststorage != nil { storage := hoststorage.GetStorage() hoststorage.Detach(ctx, self.GetUserCred()) @@ -42,7 +43,7 @@ func (self *HostStorageAttachTask) taskFail(ctx context.Context, host *models.SH db.OpsLog.LogEvent(storage, db.ACT_ATTACH_FAIL, note, self.GetUserCred()) logclient.AddActionLogWithContext(ctx, storage, logclient.ACT_ATTACH_HOST, note, self.GetUserCred(), false) } - self.SetStageFailed(ctx, reason) + self.SetStageFailed(ctx, jsonutils.NewString(reason.Error())) } func (self *HostStorageAttachTask) getHoststorage(host *models.SHost) *models.SHoststorage { @@ -60,14 +61,19 @@ func (self *HostStorageAttachTask) OnInit(ctx context.Context, obj db.IStandalon host := obj.(*models.SHost) hoststorage := self.getHoststorage(host) if hoststorage == nil { - self.taskFail(ctx, host, jsonutils.NewString("failed to find hoststorage")) + self.taskFail(ctx, host, errors.Errorf("failed to find hoststorage")) return } storage := hoststorage.GetStorage() self.SetStage("OnAttachStorageComplete", nil) - err := host.GetHostDriver().RequestAttachStorage(ctx, hoststorage, host, storage, self) + driver, err := host.GetHostDriver() if err != nil { - self.taskFail(ctx, host, jsonutils.NewString(err.Error())) + self.taskFail(ctx, host, errors.Wrapf(err, "GetHostDriver")) + return + } + err = driver.RequestAttachStorage(ctx, hoststorage, host, storage, self) + if err != nil { + self.taskFail(ctx, host, errors.Wrapf(err, "RequestAttachStorage")) } } @@ -83,5 +89,5 @@ func (self *HostStorageAttachTask) OnAttachStorageComplete(ctx context.Context, } func (self *HostStorageAttachTask) OnAttachStorageCompleteFailed(ctx context.Context, host *models.SHost, reason jsonutils.JSONObject) { - self.taskFail(ctx, host, reason) + self.taskFail(ctx, host, errors.Errorf(reason.String())) } diff --git a/pkg/compute/tasks/host_storage_detach_task.go b/pkg/compute/tasks/host_storage_detach_task.go index e2361fbee93..2f26f8b0961 100644 --- a/pkg/compute/tasks/host_storage_detach_task.go +++ b/pkg/compute/tasks/host_storage_detach_task.go @@ -19,6 +19,7 @@ import ( "fmt" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" @@ -34,7 +35,7 @@ func init() { taskman.RegisterTask(HostStorageDetachTask{}) } -func (self *HostStorageDetachTask) taskFail(ctx context.Context, host *models.SHost, reason jsonutils.JSONObject) { +func (self *HostStorageDetachTask) taskFail(ctx context.Context, host *models.SHost, reason error) { var hoststorage = new(models.SHoststorage) storageId, _ := self.GetParams().GetString("storage_id") err := models.HoststorageManager.Query().Equals("host_id", host.Id).Equals("storage_id", storageId).First(hoststorage) @@ -44,7 +45,7 @@ func (self *HostStorageDetachTask) taskFail(ctx context.Context, host *models.SH db.OpsLog.LogEvent(storage, db.ACT_DETACH_FAIL, reason, self.GetUserCred()) logclient.AddActionLogWithContext(ctx, storage, logclient.ACT_DETACH_HOST, reason, self.GetUserCred(), false) } - self.SetStageFailed(ctx, reason) + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } func (self *HostStorageDetachTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { @@ -52,14 +53,19 @@ func (self *HostStorageDetachTask) OnInit(ctx context.Context, obj db.IStandalon storageId, _ := self.GetParams().GetString("storage_id") _storage, err := models.StorageManager.FetchById(storageId) if err != nil { - self.taskFail(ctx, host, jsonutils.NewString(err.Error())) + self.taskFail(ctx, host, errors.Wrapf(err, "FetchById %s", storageId)) return } storage := _storage.(*models.SStorage) self.SetStage("OnDetachStorageComplete", nil) - err = host.GetHostDriver().RequestDetachStorage(ctx, host, storage, self) + driver, err := host.GetHostDriver() if err != nil { - self.taskFail(ctx, host, jsonutils.NewString(err.Error())) + self.taskFail(ctx, host, errors.Wrapf(err, "GetHostDriver")) + return + } + err = driver.RequestDetachStorage(ctx, host, storage, self) + if err != nil { + self.taskFail(ctx, host, errors.Wrapf(err, "RequestDetachStorage")) } } @@ -75,5 +81,5 @@ func (self *HostStorageDetachTask) OnDetachStorageComplete(ctx context.Context, } func (self *HostStorageDetachTask) OnDetachStorageCompleteFailed(ctx context.Context, host *models.SHost, reason jsonutils.JSONObject) { - self.taskFail(ctx, host, reason) + self.taskFail(ctx, host, errors.Errorf(reason.String())) } diff --git a/pkg/compute/tasks/host_sync_task.go b/pkg/compute/tasks/host_sync_task.go index ab98631170c..21d75af5498 100644 --- a/pkg/compute/tasks/host_sync_task.go +++ b/pkg/compute/tasks/host_sync_task.go @@ -35,7 +35,12 @@ func init() { func (self *HostSyncTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { host := obj.(*models.SHost) - err := host.GetHostDriver().RequestSyncOnHost(ctx, host, self) + driver, err := host.GetHostDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) + return + } + err = driver.RequestSyncOnHost(ctx, host, self) if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) log.Errorf("syncHost:%s err:%v", host.GetId(), err) diff --git a/pkg/compute/tasks/pod_delete_task.go b/pkg/compute/tasks/pod_delete_task.go index 6d7a71b00f0..eccf7a6cb31 100644 --- a/pkg/compute/tasks/pod_delete_task.go +++ b/pkg/compute/tasks/pod_delete_task.go @@ -68,7 +68,12 @@ func (t *PodDeleteTask) OnContainerDeleted(ctx context.Context, pod *models.SGue return } // call stop task to umount volumes - if err := pod.GetDriver().StartGuestStopTask(pod, ctx, t.GetUserCred(), nil, t.GetTaskId()); err != nil { + drv, err := pod.GetDriver() + if err != nil { + t.OnPodStoppedFailed(ctx, pod, jsonutils.NewString(err.Error())) + return + } + if err := drv.StartGuestStopTask(pod, ctx, t.GetUserCred(), nil, t.GetTaskId()); err != nil { if errors.Cause(err) == httperrors.ErrNotFound { t.OnPodStopped(ctx, pod, nil) return diff --git a/pkg/compute/tasks/snapshot_delete_task.go b/pkg/compute/tasks/snapshot_delete_task.go index 5cccbce8e11..1574171cbf0 100644 --- a/pkg/compute/tasks/snapshot_delete_task.go +++ b/pkg/compute/tasks/snapshot_delete_task.go @@ -116,7 +116,12 @@ func (self *SnapshotDeleteTask) OnReloadDiskSnapshot(ctx context.Context, snapsh params.Set("disk_id", jsonutils.NewString(snapshot.DiskId)) params.Set("auto_deleted", jsonutils.JSONTrue) self.SetStage("OnDeleteSnapshot", nil) - err = guest.GetDriver().RequestDeleteSnapshot(ctx, guest, self, params) + drv, err := guest.GetDriver() + if err != nil { + self.TaskFailed(ctx, snapshot, jsonutils.NewString(err.Error())) + return + } + err = drv.RequestDeleteSnapshot(ctx, guest, self, params) if err != nil { self.TaskFailed(ctx, snapshot, jsonutils.NewString(err.Error())) } @@ -171,8 +176,13 @@ func (self *BatchSnapshotsDeleteTask) StartStorageDeleteSnapshot(ctx context.Con self.SetStageFailed(ctx, jsonutils.NewString(errors.Wrapf(err, "snapshot.GetHost").Error())) return } + driver, err := host.GetHostDriver() + if err != nil { + self.SetStageFailed(ctx, jsonutils.NewString(errors.Wrapf(err, "GetHostDriver").Error())) + return + } self.SetStage("OnStorageDeleteSnapshot", nil) - err = host.GetHostDriver().RequestDeleteSnapshotsWithStorage(ctx, host, snapshot, self) + err = driver.RequestDeleteSnapshotsWithStorage(ctx, host, snapshot, self) if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } diff --git a/pkg/compute/tasks/storage_cache_image_task.go b/pkg/compute/tasks/storage_cache_image_task.go index 985262fbaa9..ad3b2ebc5a5 100644 --- a/pkg/compute/tasks/storage_cache_image_task.go +++ b/pkg/compute/tasks/storage_cache_image_task.go @@ -18,6 +18,7 @@ import ( "context" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" @@ -80,7 +81,13 @@ func (self *StorageCacheImageTask) OnRelinquishLeastUsedCachedImageComplete(ctx } } - err = host.GetHostDriver().CheckAndSetCacheImage(ctx, self.UserCred, host, storageCache, self) + driver, err := host.GetHostDriver() + if err != nil { + self.OnImageCacheCompleteFailed(ctx, storageCache, jsonutils.NewString(errors.Wrapf(err, "GetHostDriver").Error())) + return + } + + err = driver.CheckAndSetCacheImage(ctx, self.UserCred, host, storageCache, self) if err != nil { errData := taskman.Error2TaskData(err) self.OnImageCacheCompleteFailed(ctx, storageCache, errData) diff --git a/pkg/compute/tasks/storage_uncache_image_task.go b/pkg/compute/tasks/storage_uncache_image_task.go index 60329eab63e..9007851a5f3 100644 --- a/pkg/compute/tasks/storage_uncache_image_task.go +++ b/pkg/compute/tasks/storage_uncache_image_task.go @@ -16,10 +16,10 @@ package tasks import ( "context" - "fmt" "yunion.io/x/jsonutils" "yunion.io/x/log" + "yunion.io/x/pkg/errors" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" @@ -58,27 +58,27 @@ func (self *StorageUncacheImageTask) OnInit(ctx context.Context, obj db.IStandal host, err := storageCache.GetMasterHost() if err != nil { - self.OnTaskFailed(ctx, storageCache, jsonutils.NewString(fmt.Sprintf("fail to get host %s", err))) + self.OnTaskFailed(ctx, storageCache, errors.Wrapf(err, "GetMasterHost")) return } - if host == nil { - self.OnImageUncacheComplete(ctx, obj, data) + driver, err := host.GetHostDriver() + if err != nil { + self.OnTaskFailed(ctx, storageCache, errors.Wrapf(err, "GetHostDriver")) return } self.SetStage("OnImageUncacheComplete", nil) - err = host.GetHostDriver().RequestUncacheImage(ctx, host, storageCache, self) - + err = driver.RequestUncacheImage(ctx, host, storageCache, self) if err != nil { - self.OnTaskFailed(ctx, storageCache, jsonutils.NewString(err.Error())) + self.OnTaskFailed(ctx, storageCache, errors.Wrapf(err, "RequestUncacheImage")) } } -func (self *StorageUncacheImageTask) OnTaskFailed(ctx context.Context, storageCache *models.SStoragecache, reason jsonutils.JSONObject) { +func (self *StorageUncacheImageTask) OnTaskFailed(ctx context.Context, storageCache *models.SStoragecache, reason error) { body := jsonutils.NewDict() - body.Add(reason, "reason") + body.Add(jsonutils.NewString(reason.Error()), "reason") imageId, _ := self.Params.GetString("image_id") body.Add(jsonutils.NewString(imageId), "image_id") @@ -92,7 +92,7 @@ func (self *StorageUncacheImageTask) OnTaskFailed(ctx context.Context, storageCa func (self *StorageUncacheImageTask) OnImageUncacheCompleteFailed(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { storageCache := obj.(*models.SStoragecache) - self.OnTaskFailed(ctx, storageCache, data) + self.OnTaskFailed(ctx, storageCache, errors.Errorf(data.String())) } func (self *StorageUncacheImageTask) OnImageUncacheComplete(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { diff --git a/pkg/compute/tasks/storage_update_task.go b/pkg/compute/tasks/storage_update_task.go index e860648a9eb..be9df71e4a4 100644 --- a/pkg/compute/tasks/storage_update_task.go +++ b/pkg/compute/tasks/storage_update_task.go @@ -40,9 +40,9 @@ type StorageUpdateTask struct { func (self *StorageUpdateTask) OnInit(ctx context.Context, obj db.IStandaloneModel, data jsonutils.JSONObject) { self.SetStage("OnStorageUpdate", nil) storage := obj.(*models.SStorage) - dirver := models.GetStorageDriver(storage.StorageType) - if dirver != nil { - err := dirver.DoStorageUpdateTask(ctx, self.UserCred, storage, self) + driver := models.GetStorageDriver(storage.StorageType) + if driver != nil { + err := driver.DoStorageUpdateTask(ctx, self.UserCred, storage, self) if err != nil { self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } diff --git a/pkg/mcclient/cloudpods/cloudpods.go b/pkg/mcclient/cloudpods/cloudpods.go index 77073281c9c..89e571d9946 100644 --- a/pkg/mcclient/cloudpods/cloudpods.go +++ b/pkg/mcclient/cloudpods/cloudpods.go @@ -50,6 +50,7 @@ type ModelManager interface { List(session *mcclient.ClientSession, params jsonutils.JSONObject) (*printutils.ListResult, error) Create(session *mcclient.ClientSession, params jsonutils.JSONObject) (jsonutils.JSONObject, error) Delete(session *mcclient.ClientSession, id string, param jsonutils.JSONObject) (jsonutils.JSONObject, error) + DeleteWithParam(session *mcclient.ClientSession, id string, query jsonutils.JSONObject, body jsonutils.JSONObject) (jsonutils.JSONObject, error) PerformAction(session *mcclient.ClientSession, id string, action string, params jsonutils.JSONObject) (jsonutils.JSONObject, error) Get(session *mcclient.ClientSession, id string, params jsonutils.JSONObject) (jsonutils.JSONObject, error) Update(session *mcclient.ClientSession, id string, params jsonutils.JSONObject) (jsonutils.JSONObject, error) @@ -123,7 +124,11 @@ func (self *SCloudpodsClient) auth() error { serviceRegion = region } } - self.s = client.NewSession(context.Background(), serviceRegion, "", "publicURL", token) + endpoint := "publicURL" + if strings.Contains(self.authURL, "api/s/identity/v3") { + endpoint = "apigateway" + } + self.s = client.NewSession(context.Background(), serviceRegion, "", endpoint, token) if !self.s.GetToken().HasSystemAdminPrivilege() { return fmt.Errorf("no system admin privilege") } @@ -176,7 +181,7 @@ func (self *SCloudpodsClient) delete(manager ModelManager, id string) error { return nil } params := map[string]interface{}{"override_pending_delete": true} - _, err := manager.Delete(self.s, id, jsonutils.Marshal(params)) + _, err := manager.DeleteWithParam(self.s, id, jsonutils.Marshal(params), nil) return err } diff --git a/pkg/mcclient/cloudpods/host.go b/pkg/mcclient/cloudpods/host.go index a798ea195e1..2f657789a8c 100644 --- a/pkg/mcclient/cloudpods/host.go +++ b/pkg/mcclient/cloudpods/host.go @@ -59,11 +59,11 @@ func (host *SHost) GetOvnVersion() string { } func (host *SHost) Refresh() error { - host, err := host.zone.region.GetHost(host.Id) + h, err := host.zone.region.GetHost(host.Id) if err != nil { return err } - return jsonutils.Update(host, host) + return jsonutils.Update(host, h) } func (host *SHost) getIWires() ([]cloudprovider.ICloudWire, error) { @@ -157,12 +157,16 @@ func (host *SHost) GetStorageSizeMB() int64 { return host.StorageSize } +func (host *SHost) GetStorageInfo() jsonutils.JSONObject { + return host.StorageInfo +} + func (host *SHost) GetStorageType() string { return host.StorageType } func (host *SHost) GetHostType() string { - return api.HOST_TYPE_CLOUDPODS + return host.HostType } func (host *SHost) GetIsMaintenance() bool { @@ -262,7 +266,10 @@ func (host *SHost) GetIHostNics() ([]cloudprovider.ICloudHostNetInterface, error } func (host *SHost) CreateVM(opts *cloudprovider.SManagedVMCreateConfig) (cloudprovider.ICloudVM, error) { - hypervisor := api.HOSTTYPE_HYPERVISOR[host.HostType] + hypervisor := api.HOST_TYPE_HYPERVISOR + if host.HostType == api.HOST_TYPE_ESXI { + hypervisor = api.HYPERVISOR_ESXI + } ins, err := host.zone.region.CreateInstance(host.Id, hypervisor, opts) if err != nil { return nil, err @@ -271,6 +278,26 @@ func (host *SHost) CreateVM(opts *cloudprovider.SManagedVMCreateConfig) (cloudpr return ins, nil } +func (host *SHost) Start() error { + _, err := host.zone.region.perform(&modules.Hosts, host.Id, "start", nil) + return err +} + +func (host *SHost) Stop() error { + _, err := host.zone.region.perform(&modules.Hosts, host.Id, "stop", nil) + return err +} + +func (host *SHost) CreateBaremetalServer(opts *api.ServerCreateInput) (cloudprovider.ICloudVM, error) { + vm := &SInstance{host: host} + opts.PreferHost = host.Id + err := host.zone.region.create(&modules.Servers, opts, vm) + if err != nil { + return nil, err + } + return vm, nil +} + func (region *SRegion) GetHost(id string) (*SHost, error) { host := &SHost{} err := region.cli.get(&modules.Hosts, id, nil, host) @@ -303,9 +330,7 @@ func (zone *SZone) GetIHosts() ([]cloudprovider.ICloudHost, error) { } func (region *SRegion) GetHosts(zoneId string) ([]SHost, error) { - params := map[string]interface{}{ - "baremetal": false, - } + params := map[string]interface{}{} if len(zoneId) > 0 { params["zone_id"] = zoneId } diff --git a/pkg/mcclient/cloudpods/instance.go b/pkg/mcclient/cloudpods/instance.go index 0895cdf9639..e082a022359 100644 --- a/pkg/mcclient/cloudpods/instance.go +++ b/pkg/mcclient/cloudpods/instance.go @@ -212,7 +212,7 @@ func (self *SInstance) SetSecurityGroups(ids []string) error { } func (self *SInstance) GetHypervisor() string { - return api.HYPERVISOR_CLOUDPODS + return self.Hypervisor } func (self *SInstance) StartVM(ctx context.Context) error { @@ -321,7 +321,7 @@ func (self *SRegion) GetInstanceVnc(id, name string) (*cloudprovider.ServerVncOu Protocol: "cloudpods", InstanceId: id, InstanceName: name, - Hypervisor: api.HYPERVISOR_CLOUDPODS, + Hypervisor: api.HYPERVISOR_DEFAULT, } err = resp.Unmarshal(&result) if err != nil { diff --git a/pkg/notify/models/robot.go b/pkg/notify/models/robot.go index 86c7bee793a..eb24b005d36 100644 --- a/pkg/notify/models/robot.go +++ b/pkg/notify/models/robot.go @@ -156,8 +156,8 @@ func (r *SRobot) ValidateUpdateData(ctx context.Context, userCred mcclient.Token } if len(input.Address) > 0 { // check Address - dirver := GetDriver(fmt.Sprintf("%s-robot", r.Type)) - err := dirver.Send(ctx, api.SendParams{ + driver := GetDriver(fmt.Sprintf("%s-robot", r.Type)) + err := driver.Send(ctx, api.SendParams{ Header: input.Header, Body: input.Body, MsgKey: input.MsgKey, diff --git a/pkg/scheduler/algorithm/predicates/cloudprovider_schedtag_predicate.go b/pkg/scheduler/algorithm/predicates/cloudprovider_schedtag_predicate.go index b74cce64391..6ad72629137 100644 --- a/pkg/scheduler/algorithm/predicates/cloudprovider_schedtag_predicate.go +++ b/pkg/scheduler/algorithm/predicates/cloudprovider_schedtag_predicate.go @@ -44,7 +44,8 @@ func (p *CloudproviderSchedtagPredicate) Clone() core.FitPredicate { } func (p *CloudproviderSchedtagPredicate) PreExecute(ctx context.Context, u *core.Unit, cs []core.Candidater) (bool, error) { - if !u.GetHypervisorDriver().DoScheduleCloudproviderTagFilter() { + driver := u.GetHypervisorDriver() + if driver != nil && !driver.DoScheduleCloudproviderTagFilter() { return false, nil } return p.ServerBaseSchedtagPredicate.PreExecute(ctx, u, cs) diff --git a/pkg/scheduler/algorithm/predicates/disk_schedtag_predicate.go b/pkg/scheduler/algorithm/predicates/disk_schedtag_predicate.go index e6a812af980..7e4adf2e408 100644 --- a/pkg/scheduler/algorithm/predicates/disk_schedtag_predicate.go +++ b/pkg/scheduler/algorithm/predicates/disk_schedtag_predicate.go @@ -129,7 +129,11 @@ func (p *DiskSchedtagPredicate) IsResourceFitInput(ctx context.Context, u *core. } } } - storageTypes := p.GetHypervisorDriver().GetStorageTypes() + storageTypes := []string{} + driver := p.GetHypervisorDriver() + if driver != nil { + storageTypes = driver.GetStorageTypes() + } if len(storageTypes) != 0 && !utils.IsInStringArray(storage.StorageType, storageTypes) { return &FailReason{ fmt.Sprintf("Storage %s storage type %s not in %v", storage.Name, storage.StorageType, storageTypes), @@ -148,7 +152,7 @@ func (p *DiskSchedtagPredicate) IsResourceFitInput(ctx context.Context, u *core. } } - if u.GetHypervisorDriver().DoScheduleStorageFilter() { + if driver != nil && driver.DoScheduleStorageFilter() { // free capacity check isMigrate := len(u.SchedData().HostId) > 0 if !isMigrate || !utils.IsInStringArray(storage.StorageType, computeapi.SHARED_STORAGE) { diff --git a/pkg/scheduler/algorithm/predicates/guest/cpu_predicate.go b/pkg/scheduler/algorithm/predicates/guest/cpu_predicate.go index 069d57ae8c0..bbbe0ddd92d 100644 --- a/pkg/scheduler/algorithm/predicates/guest/cpu_predicate.go +++ b/pkg/scheduler/algorithm/predicates/guest/cpu_predicate.go @@ -37,7 +37,8 @@ func (f *CPUPredicate) Clone() core.FitPredicate { } func (f *CPUPredicate) PreExecute(ctx context.Context, u *core.Unit, cs []core.Candidater) (bool, error) { - if !u.GetHypervisorDriver().DoScheduleCPUFilter() { + driver := u.GetHypervisorDriver() + if driver != nil && !driver.DoScheduleCPUFilter() { return false, nil } diff --git a/pkg/scheduler/algorithm/predicates/guest/image_predicate.go b/pkg/scheduler/algorithm/predicates/guest/image_predicate.go index 33d30e075e0..c47bda2cab1 100644 --- a/pkg/scheduler/algorithm/predicates/guest/image_predicate.go +++ b/pkg/scheduler/algorithm/predicates/guest/image_predicate.go @@ -51,7 +51,7 @@ func (f *ImagePredicate) PreExecute(ctx context.Context, u *core.Unit, cs []core if len(imageId) == 0 || u.SchedData().PreferZone != "" { return false, nil } - if !utils.IsInStringArray(u.SchedData().Hypervisor, compute.PUBLIC_CLOUD_HYPERVISORS) && !utils.IsInStringArray(u.SchedData().Hypervisor, compute.PRIVATE_CLOUD_HYPERVISORS) { + if !utils.IsInStringArray(u.SchedData().Provider, compute.PUBLIC_CLOUD_PROVIDERS) && !utils.IsInStringArray(u.SchedData().Provider, compute.PRIVATE_CLOUD_PROVIDERS) { return false, nil } obj, err := models.CachedimageManager.FetchById(imageId) diff --git a/pkg/scheduler/algorithm/predicates/guest/memory_predicate.go b/pkg/scheduler/algorithm/predicates/guest/memory_predicate.go index 2c2cfdffc2c..ffb6f1a64e6 100644 --- a/pkg/scheduler/algorithm/predicates/guest/memory_predicate.go +++ b/pkg/scheduler/algorithm/predicates/guest/memory_predicate.go @@ -37,7 +37,8 @@ func (p *MemoryPredicate) Clone() core.FitPredicate { } func (p *MemoryPredicate) PreExecute(ctx context.Context, u *core.Unit, cs []core.Candidater) (bool, error) { - if !u.GetHypervisorDriver().DoScheduleMemoryFilter() { + driver := u.GetHypervisorDriver() + if driver != nil && !driver.DoScheduleMemoryFilter() { return false, nil } diff --git a/pkg/scheduler/algorithm/predicates/guest/storage_predicate.go b/pkg/scheduler/algorithm/predicates/guest/storage_predicate.go index 5ae34f8078d..1f095617ca8 100644 --- a/pkg/scheduler/algorithm/predicates/guest/storage_predicate.go +++ b/pkg/scheduler/algorithm/predicates/guest/storage_predicate.go @@ -46,7 +46,8 @@ func (p *StoragePredicate) Clone() core.FitPredicate { } func (p *StoragePredicate) PreExecute(ctx context.Context, u *core.Unit, cs []core.Candidater) (bool, error) { - if !u.GetHypervisorDriver().DoScheduleStorageFilter() { + driver := u.GetHypervisorDriver() + if driver != nil && !driver.DoScheduleStorageFilter() { return false, nil } return true, nil diff --git a/pkg/scheduler/algorithm/predicates/network_predicate.go b/pkg/scheduler/algorithm/predicates/network_predicate.go index 996d8e80003..e97f1dc22d7 100644 --- a/pkg/scheduler/algorithm/predicates/network_predicate.go +++ b/pkg/scheduler/algorithm/predicates/network_predicate.go @@ -294,7 +294,11 @@ func IsNetworkAvailable( } func (p *NetworkPredicate) GetNetworkTypes(u *core.Unit, specifyType string) []string { - netTypes := p.GetHypervisorDriver(u).GetRandomNetworkTypes() + netTypes := []string{} + driver := p.GetHypervisorDriver(u) + if driver != nil { + netTypes = driver.GetRandomNetworkTypes() + } if len(specifyType) > 0 { netTypes = []string{specifyType} } diff --git a/pkg/scheduler/algorithm/predicates/network_schedtag_predicate.go b/pkg/scheduler/algorithm/predicates/network_schedtag_predicate.go index c341ea769c7..b3bf786fd9b 100644 --- a/pkg/scheduler/algorithm/predicates/network_schedtag_predicate.go +++ b/pkg/scheduler/algorithm/predicates/network_schedtag_predicate.go @@ -103,7 +103,11 @@ func (p *NetworkSchedtagPredicate) IsResourceFitInput(ctx context.Context, u *co } func (p *NetworkSchedtagPredicate) GetNetworkTypes(specifyType string) []string { - netTypes := p.GetHypervisorDriver().GetRandomNetworkTypes() + netTypes := []string{} + driver := p.GetHypervisorDriver() + if driver != nil { + netTypes = driver.GetRandomNetworkTypes() + } if len(specifyType) > 0 { netTypes = []string{specifyType} } diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 8990bf52848..8c2fa14354b 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -60,7 +60,8 @@ func (b *BasePredicate) PreExecute(ctx context.Context, unit *core.Unit, candis } func (b *BasePredicate) GetHypervisorDriver(u *core.Unit) models.IGuestDriver { - return models.GetDriver(u.GetHypervisor()) + driver, _ := models.GetDriver(u.GetHypervisor(), u.SchedInfo.Provider) + return driver } type PredicateHelper struct { @@ -280,6 +281,7 @@ type BaseSchedtagPredicate struct { CandidateInputResources *CandidateInputResourcesMap Hypervisor string + Provider string } func NewBaseSchedtagPredicate() *BaseSchedtagPredicate { @@ -344,7 +346,8 @@ func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict { } func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver { - return models.GetDriver(p.Hypervisor) + driver, _ := models.GetDriver(p.Hypervisor, p.Provider) + return driver } func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) { diff --git a/pkg/scheduler/algorithm/predicates/sku_predicate.go b/pkg/scheduler/algorithm/predicates/sku_predicate.go index 076e5c5e6e1..d3b2e115022 100644 --- a/pkg/scheduler/algorithm/predicates/sku_predicate.go +++ b/pkg/scheduler/algorithm/predicates/sku_predicate.go @@ -35,7 +35,8 @@ func (p *InstanceTypePredicate) Clone() core.FitPredicate { } func (p *InstanceTypePredicate) PreExecute(ctx context.Context, u *core.Unit, cs []core.Candidater) (bool, error) { - if u.SchedData().InstanceType == "" || !u.GetHypervisorDriver().DoScheduleSKUFilter() { + driver := u.GetHypervisorDriver() + if u.SchedData().InstanceType == "" || (driver == nil || !driver.DoScheduleSKUFilter()) { return false, nil } return true, nil diff --git a/pkg/scheduler/api/sched.go b/pkg/scheduler/api/sched.go index 2273250d365..bd4ccf9f2e7 100644 --- a/pkg/scheduler/api/sched.go +++ b/pkg/scheduler/api/sched.go @@ -156,6 +156,14 @@ func NewSchedInfo(input *api.ScheduleInput) *SchedInfo { preferCandidates = append(preferCandidates, data.PreferHost) } + if len(data.PreferRegion) > 0 && len(data.Provider) == 0 { + regionObj, _ := models.CloudregionManager.FetchById(data.PreferRegion) + if regionObj != nil { + region := regionObj.(*models.SCloudregion) + data.Provider = region.Provider + } + } + if data.Backup { if data.PreferBackupHost != "" { preferCandidates = append(preferCandidates, data.PreferBackupHost) diff --git a/pkg/scheduler/cache/candidate/base.go b/pkg/scheduler/cache/candidate/base.go index 1246482f060..9a4406e56f0 100644 --- a/pkg/scheduler/cache/candidate/base.go +++ b/pkg/scheduler/cache/candidate/base.go @@ -777,11 +777,15 @@ func (b *BaseHostDesc) fillOnecloudVpcNetworks(netGetter *networkGetter) error { } func (b *BaseHostDesc) GetHypervisorDriver() computemodels.IGuestDriver { - hypervisor := computeapi.HOSTTYPE_HYPERVISOR[b.HostType] - if hypervisor == "" { + if b.Region == nil { return nil } - return computemodels.GetDriver(hypervisor) + hostDriver, _ := computemodels.GetHostDriver(b.HostType, b.Region.Provider) + if hostDriver == nil { + return nil + } + driver, _ := computemodels.GetDriver(hostDriver.GetHypervisor(), b.Region.Provider) + return driver } func (b *BaseHostDesc) fillStorages(host *computemodels.SHost) error { @@ -796,12 +800,9 @@ func (b *BaseHostDesc) fillStorages(host *computemodels.SHost) error { SStorage: &storage, ActualFreeCapacity: storage.Capacity - storage.ActualCapacityUsed, } - if b.GetHypervisorDriver() == nil { + driver := b.GetHypervisorDriver() + if driver == nil || driver.DoScheduleStorageFilter() { cs.FreeCapacity = storage.GetFreeCapacity() - } else { - if b.GetHypervisorDriver().DoScheduleStorageFilter() { - cs.FreeCapacity = storage.GetFreeCapacity() - } } ss = append(ss, cs) } @@ -883,7 +884,10 @@ func (h *BaseHostDesc) getQuotaKeys(s *api.SchedInfo) computemodels.SComputeReso } computeKeys.RegionId = h.Region.Id computeKeys.ZoneId = h.Zone.Id - computeKeys.Hypervisor = computeapi.HOSTTYPE_HYPERVISOR[h.HostType] + driver, _ := computemodels.GetHostDriver(h.HostType, computeKeys.Provider) + if driver != nil { + computeKeys.Hypervisor = driver.GetHypervisor() + } return computeKeys } diff --git a/pkg/scheduler/core/context.go b/pkg/scheduler/core/context.go index 85a8e55dc23..02713cb5732 100644 --- a/pkg/scheduler/core/context.go +++ b/pkg/scheduler/core/context.go @@ -23,7 +23,6 @@ import ( "yunion.io/x/log" "yunion.io/x/pkg/tristate" - "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/compute/models" "yunion.io/x/onecloud/pkg/scheduler/api" "yunion.io/x/onecloud/pkg/scheduler/core/score" @@ -448,15 +447,12 @@ func (u *Unit) SchedData() *api.SchedInfo { } func (u *Unit) GetHypervisor() string { - hypervisor := compute.HOSTTYPE_HYPERVISOR[u.SchedData().Hypervisor] - if hypervisor == "" { - hypervisor = u.SchedData().Hypervisor - } - return hypervisor + return u.SchedData().Hypervisor } func (u *Unit) GetHypervisorDriver() models.IGuestDriver { - return models.GetDriver(u.GetHypervisor()) + driver, _ := models.GetDriver(u.GetHypervisor(), u.SchedInfo.Provider) + return driver } func (u *Unit) AppendFailedCandidates(fcs []FailedCandidate) { diff --git a/pkg/scheduler/data_manager/sku/sku.go b/pkg/scheduler/data_manager/sku/sku.go index 7cb0addfbb9..44d50f96753 100644 --- a/pkg/scheduler/data_manager/sku/sku.go +++ b/pkg/scheduler/data_manager/sku/sku.go @@ -69,6 +69,7 @@ type ServerSku struct { Name string `json:"name"` RegionId string `json:"cloudregion_id"` ZoneId string `json:"zone_id"` + Provider string `json:"provider"` } type skuList []*ServerSku @@ -140,7 +141,7 @@ func (m *SSkuManager) syncOnce() { startTime := time.Now() skus := make([]ServerSku, 0) - q := models.ServerSkuManager.Query("id", "name", "cloudregion_id", "zone_id").IsTrue("enabled") + q := models.ServerSkuManager.Query("id", "name", "cloudregion_id", "zone_id", "provider").IsTrue("enabled") q = q.Filter( sqlchemy.OR( sqlchemy.Equals(q.Field("prepaid_status"), computeapi.SkuStatusAvailable), diff --git a/pkg/scheduler/manager/task_queue.go b/pkg/scheduler/manager/task_queue.go index 58cfeac7bd8..a48912abbb3 100644 --- a/pkg/scheduler/manager/task_queue.go +++ b/pkg/scheduler/manager/task_queue.go @@ -135,7 +135,7 @@ func setSchedPendingUsage(driver computemodels.IGuestDriver, req *api.SchedInfo, } func IsDriverSkipScheduleDirtyMark(driver computemodels.IGuestDriver) bool { - return !(driver.DoScheduleCPUFilter() && driver.DoScheduleMemoryFilter() && driver.DoScheduleStorageFilter()) + return driver == nil || !(driver.DoScheduleCPUFilter() && driver.DoScheduleMemoryFilter() && driver.DoScheduleStorageFilter()) } func (te *TaskExecutor) cleanup() { diff --git a/vendor/modules.txt b/vendor/modules.txt index e6af4a6dc88..4bed85662c3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1508,7 +1508,7 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240428085659-1415194185cf +# yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240429100059-f7a31bba5e42 ## explicit; go 1.18 yunion.io/x/cloudmux/pkg/apis yunion.io/x/cloudmux/pkg/apis/billing diff --git a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go index db788191ff1..27c7923dee0 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go +++ b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go @@ -284,11 +284,9 @@ type ICloudHost interface { GetIVMs() ([]ICloudVM, error) GetIVMById(id string) (ICloudVM, error) - // GetIWires() ([]ICloudWire, error) GetIStorages() ([]ICloudStorage, error) GetIStorageById(id string) (ICloudStorage, error) - // GetStatus() string // os status GetEnabled() bool // is enabled GetHostStatus() string // service status GetAccessIp() string // @@ -308,6 +306,7 @@ type ICloudHost interface { GetStorageSizeMB() int64 GetStorageType() string GetHostType() string + GetStorageInfo() jsonutils.JSONObject GetIsMaintenance() bool GetVersion() string diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go index 8fa10a932a2..f39ce2dc93a 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go @@ -598,14 +598,10 @@ func (host *SHost) GetMemSizeMB() int { return int(host.getHostSystem().Summary.Hardware.MemorySize / 1024 / 1024) } -func (host *SHost) GetStorageInfo() []SHostStorageInfo { - if host.storageInfo == nil { - host.storageInfo = host.getStorageInfo() - } - return host.storageInfo -} - func (host *SHost) getStorageInfo() []SHostStorageInfo { + if host.storageInfo != nil { + return host.storageInfo + } diskSlots := make(map[int]SHostStorageInfo) list := host.getStorages() for i := 0; i < len(list); i += 1 { @@ -633,7 +629,8 @@ func (host *SHost) getStorageInfo() []SHostStorageInfo { break } } - return disks + host.storageInfo = disks + return host.storageInfo } func (host *SHost) getStorages() []*SHostStorageAdapterInfo { @@ -731,7 +728,7 @@ func (host *SHost) GetStorageSizeMB() int64 { func (host *SHost) GetStorageType() string { ssd := 0 rotate := 0 - storages := host.GetStorageInfo() + storages := host.getStorageInfo() for i := 0; i < len(storages); i += 1 { if storages[i].Rotate { rotate += 1 diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go index 961c40d6ef5..f078524bd71 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go @@ -14,7 +14,10 @@ package multicloud -import "yunion.io/x/cloudmux/pkg/apis" +import ( + "yunion.io/x/cloudmux/pkg/apis" + "yunion.io/x/jsonutils" +) type SHostBase struct { SResourceBase @@ -44,3 +47,7 @@ func (host *SHostBase) GetOvnVersion() string { func (host *SHostBase) GetCpuArchitecture() string { return apis.OS_ARCH_X86_64 } + +func (host *SHostBase) GetStorageInfo() jsonutils.JSONObject { + return nil +}