diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go
index ed5a42878b15..1b7f1e08291e 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go
@@ -24,7 +24,7 @@ import (
"sync"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
azStorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go
index e31ff601bfeb..5a553ec9c2ec 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go
@@ -30,9 +30,9 @@ import (
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_cache.go
index 3bb73528a3bb..f6d81838a1f8 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_cache.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_cache.go
@@ -24,7 +24,7 @@ import (
"sync"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/skewer"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go
index d3e71378a026..231d24771632 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go
@@ -22,7 +22,7 @@ import (
"net/http"
"sync"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
"github.com/stretchr/testify/mock"
)
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_instance.go b/cluster-autoscaler/cloudprovider/azure/azure_instance.go
index de2f2dc7b422..620fe3025594 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_instance.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_instance.go
@@ -19,10 +19,11 @@ package azure
import (
"context"
"fmt"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
- "k8s.io/klog/v2"
"regexp"
"strings"
+
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+ "k8s.io/klog/v2"
)
// GetVMSSTypeStatically uses static list of vmss generated at azure_instance_types.go to fetch vmss instance information.
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_kubernetes_service_pool_test.go b/cluster-autoscaler/cloudprovider/azure/azure_kubernetes_service_pool_test.go
index 6b1841c99033..355fd995deb8 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_kubernetes_service_pool_test.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_kubernetes_service_pool_test.go
@@ -22,7 +22,7 @@ import (
"testing"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go b/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go
index 09396204df39..7ebc552ecc59 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go
@@ -25,7 +25,7 @@ import (
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go
index 9b43df1d630e..0e5fd4e56982 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go
@@ -31,7 +31,7 @@ import (
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
)
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go
index d8ae04ede040..fac5b1a93186 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go
@@ -18,17 +18,19 @@ package azure
import (
"fmt"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "net/http"
+ "testing"
+
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
- "net/http"
+
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient"
- "testing"
)
func newTestScaleSet(manager *AzureManager, name string) *ScaleSet {
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_template.go b/cluster-autoscaler/cloudprovider/azure/azure_template.go
index 12b234cd2918..3e49df3a0c29 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_template.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_template.go
@@ -18,7 +18,13 @@ package azure
import (
"fmt"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "math/rand"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,11 +32,6 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
cloudvolume "k8s.io/cloud-provider/volume"
"k8s.io/klog/v2"
- "math/rand"
- "regexp"
- "strconv"
- "strings"
- "time"
)
const (
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util.go b/cluster-autoscaler/cloudprovider/azure/azure_util.go
index bb09b42a483b..08d8c749a88d 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_util.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_util.go
@@ -31,7 +31,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
azStorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util_test.go b/cluster-autoscaler/cloudprovider/azure/azure_util_test.go
index 11ef6c6ec20c..2a5be51d851d 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_util_test.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_util_test.go
@@ -22,7 +22,7 @@ import (
"testing"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod
index c63f28750925..e5d767f85e95 100644
--- a/cluster-autoscaler/go.mod
+++ b/cluster-autoscaler/go.mod
@@ -4,9 +4,9 @@ go 1.19
require (
cloud.google.com/go v0.97.0
- github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
- github.com/Azure/go-autorest/autorest v0.11.27
- github.com/Azure/go-autorest/autorest/adal v0.9.20
+ github.com/Azure/azure-sdk-for-go v67.2.0+incompatible
+ github.com/Azure/go-autorest/autorest v0.11.28
+ github.com/Azure/go-autorest/autorest/adal v0.9.21
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8
github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/to v0.4.0
@@ -26,9 +26,9 @@ require (
github.com/prometheus/client_golang v1.14.0
github.com/satori/go.uuid v1.2.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.8.0
- golang.org/x/crypto v0.1.0
- golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10
+ github.com/stretchr/testify v1.8.1
+ golang.org/x/crypto v0.5.0
+ golang.org/x/net v0.5.0
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
google.golang.org/api v0.60.0
google.golang.org/grpc v1.49.0
@@ -45,8 +45,8 @@ require (
k8s.io/klog/v2 v2.80.1
k8s.io/kubernetes v1.26.0
k8s.io/legacy-cloud-providers v0.0.0
- k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
- sigs.k8s.io/cloud-provider-azure v1.24.2
+ k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
+ sigs.k8s.io/cloud-provider-azure v1.26.2
)
require (
@@ -132,9 +132,9 @@ require (
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
- github.com/spf13/cobra v1.6.0 // indirect
+ github.com/spf13/cobra v1.6.1 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
- github.com/stretchr/objx v0.4.0 // indirect
+ github.com/stretchr/objx v0.5.0 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/vishvananda/netlink v1.1.0 // indirect
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
@@ -158,9 +158,9 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
- golang.org/x/sys v0.3.0 // indirect
- golang.org/x/term v0.3.0 // indirect
- golang.org/x/text v0.5.0 // indirect
+ golang.org/x/sys v0.4.0 // indirect
+ golang.org/x/term v0.4.0 // indirect
+ golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum
index e0faca473abc..a05a17155702 100644
--- a/cluster-autoscaler/go.sum
+++ b/cluster-autoscaler/go.sum
@@ -46,22 +46,22 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v46.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
-github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k=
+github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
-github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
-github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
+github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
+github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
-github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk=
+github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY=
@@ -475,9 +475,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
-github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
+github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q=
+github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -557,8 +556,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
-github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -568,8 +567,9 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -577,8 +577,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -621,7 +622,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 h1:KQjX0qQ8H21oBUAvFp4ZLKJMMLIluONvSPDAFIGmX58=
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
@@ -668,9 +668,9 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -750,8 +750,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc=
-golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -857,12 +857,12 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -872,8 +872,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1172,15 +1172,15 @@ k8s.io/legacy-cloud-providers v0.26.0/go.mod h1:dOOgYhHiMNWNla/XyM4Ppgjcrn3HulGa
k8s.io/mount-utils v0.26.1-rc.0 h1:v7GKm3S5IdmcZvd7gM0QtANdVJRIPpycvgiT/o9y85I=
k8s.io/mount-utils v0.26.1-rc.0/go.mod h1:au99w4FWU5ZWelLb3Yx6kJc8RZ387IyWVM9tN65Yhxo=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs=
-k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
+k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0=
-sigs.k8s.io/cloud-provider-azure v1.24.2 h1:t0c3Q7GAGQ0oqyl/KiHLtkS4obEYJpAMRYUuhEtgs/k=
-sigs.k8s.io/cloud-provider-azure v1.24.2/go.mod h1:uKqonMQbC2zqwq7NIWOfQLgrsMzD02Wj5UFFl1te1GY=
+sigs.k8s.io/cloud-provider-azure v1.26.2 h1://Yr95O53fcY/sakPvXdekCW4o2QKhfs1kNtipR7LpE=
+sigs.k8s.io/cloud-provider-azure v1.26.2/go.mod h1:9m8BqB9ubr94uWWgbIY8TyUmHhsE2UEKdAZZG8O/ymc=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go
index 1812f27feb99..c7c4543154d5 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package compute implements the Azure ARM Compute service API version .
//
// Compute Client
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute/client.go
index 1812f27feb99..c7c4543154d5 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package compute implements the Azure ARM Compute service API version .
//
// Compute Client
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json
deleted file mode 100644
index ee881857d1cc..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "commit": "bd64220293a403f70ae8beebd56fb86951007acf",
- "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
- "tag": "package-2021-07-01",
- "use": "@microsoft.azure/autorest.go@2.1.187",
- "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
- "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-07-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
- "additional_properties": {
- "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix"
- }
-}
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/CHANGELOG.md
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/CHANGELOG.md
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/_meta.json
new file mode 100644
index 000000000000..b93e50bd72d8
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/_meta.json
@@ -0,0 +1,11 @@
+{
+ "commit": "1c8d7850afbec9ede6de6f2d14bcc30896a74ed6",
+ "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
+ "tag": "package-2022-03-01",
+ "use": "@microsoft.azure/autorest.go@2.1.188",
+ "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
+ "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.188 --tag=package-2022-03-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
+ "additional_properties": {
+ "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION"
+ }
+}
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/availabilitysets.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/availabilitysets.go
index 7953c6555084..ea14bbbb511e 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/availabilitysets.go
@@ -77,7 +77,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -155,7 +155,7 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -230,7 +230,7 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -309,7 +309,7 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -423,7 +423,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -501,7 +501,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -618,7 +618,7 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/capacityreservationgroups.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/capacityreservationgroups.go
index a525a29711a9..e73d47ddeb97 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/capacityreservationgroups.go
@@ -78,7 +78,7 @@ func (client CapacityReservationGroupsClient) CreateOrUpdatePreparer(ctx context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -158,7 +158,7 @@ func (client CapacityReservationGroupsClient) DeletePreparer(ctx context.Context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -237,7 +237,7 @@ func (client CapacityReservationGroupsClient) GetPreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -323,7 +323,7 @@ func (client CapacityReservationGroupsClient) ListByResourceGroupPreparer(ctx co
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -444,7 +444,7 @@ func (client CapacityReservationGroupsClient) ListBySubscriptionPreparer(ctx con
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -562,7 +562,7 @@ func (client CapacityReservationGroupsClient) UpdatePreparer(ctx context.Context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/capacityreservations.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/capacityreservations.go
index fc894216ae13..c2599c46262e 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/capacityreservations.go
@@ -80,7 +80,7 @@ func (client CapacityReservationsClient) CreateOrUpdatePreparer(ctx context.Cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -165,7 +165,7 @@ func (client CapacityReservationsClient) DeletePreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -255,7 +255,7 @@ func (client CapacityReservationsClient) GetPreparer(ctx context.Context, resour
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -340,7 +340,7 @@ func (client CapacityReservationsClient) ListByCapacityReservationGroupPreparer(
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -449,7 +449,7 @@ func (client CapacityReservationsClient) UpdatePreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/client.go
similarity index 71%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/client.go
index 1812f27feb99..c7c4543154d5 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package compute implements the Azure ARM Compute service API version .
//
// Compute Client
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudserviceoperatingsystems.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudserviceoperatingsystems.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudserviceroleinstances.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudserviceroleinstances.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudserviceroles.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudserviceroles.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudservices.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudservices.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudservicesupdatedomain.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/cloudservicesupdatedomain.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/communitygalleries.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/communitygalleries.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/communitygalleryimages.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/communitygalleryimages.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/communitygalleryimageversions.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/communitygalleryimageversions.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/dedicatedhostgroups.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/dedicatedhostgroups.go
index 67ace3df4d8e..0b7ea6ca3f59 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/dedicatedhostgroups.go
@@ -88,7 +88,7 @@ func (client DedicatedHostGroupsClient) CreateOrUpdatePreparer(ctx context.Conte
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -166,7 +166,7 @@ func (client DedicatedHostGroupsClient) DeletePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -244,7 +244,7 @@ func (client DedicatedHostGroupsClient) GetPreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -327,7 +327,7 @@ func (client DedicatedHostGroupsClient) ListByResourceGroupPreparer(ctx context.
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -441,7 +441,7 @@ func (client DedicatedHostGroupsClient) ListBySubscriptionPreparer(ctx context.C
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -555,7 +555,7 @@ func (client DedicatedHostGroupsClient) UpdatePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/dedicatedhosts.go
similarity index 84%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/dedicatedhosts.go
index a58e6b2b84bb..67a3daf017b5 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/dedicatedhosts.go
@@ -82,7 +82,7 @@ func (client DedicatedHostsClient) CreateOrUpdatePreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -165,7 +165,7 @@ func (client DedicatedHostsClient) DeletePreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -254,7 +254,7 @@ func (client DedicatedHostsClient) GetPreparer(ctx context.Context, resourceGrou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -339,7 +339,7 @@ func (client DedicatedHostsClient) ListByHostGroupPreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -407,6 +407,89 @@ func (client DedicatedHostsClient) ListByHostGroupComplete(ctx context.Context,
return
}
+// Restart restart the dedicated host. The operation will complete successfully once the dedicated host has restarted
+// and is running. To determine the health of VMs deployed on the dedicated host after the restart check the Resource
+// Health Center in the Azure Portal. Please refer to
+// https://docs.microsoft.com/azure/service-health/resource-health-overview for more details.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// hostGroupName - the name of the dedicated host group.
+// hostName - the name of the dedicated host.
+func (client DedicatedHostsClient) Restart(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string) (result DedicatedHostsRestartFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.Restart")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RestartPreparer(ctx, resourceGroupName, hostGroupName, hostName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Restart", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.RestartSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Restart", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// RestartPreparer prepares the Restart request.
+func (client DedicatedHostsClient) RestartPreparer(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "hostGroupName": autorest.Encode("path", hostGroupName),
+ "hostName": autorest.Encode("path", hostName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2022-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}/restart", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RestartSender sends the Restart request. The method will close the
+// http.Response Body if it receives an error.
+func (client DedicatedHostsClient) RestartSender(req *http.Request) (future DedicatedHostsRestartFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// RestartResponder handles the response to the Restart request. The method always
+// closes the http.Response Body.
+func (client DedicatedHostsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
// Update update an dedicated host .
// Parameters:
// resourceGroupName - the name of the resource group.
@@ -448,7 +531,7 @@ func (client DedicatedHostsClient) UpdatePreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskaccesses.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskaccesses.go
index 618db0d550a5..0026f2946e72 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskaccesses.go
@@ -72,7 +72,7 @@ func (client DiskAccessesClient) CreateOrUpdatePreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -155,7 +155,7 @@ func (client DiskAccessesClient) DeletePreparer(ctx context.Context, resourceGro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -237,7 +237,7 @@ func (client DiskAccessesClient) DeleteAPrivateEndpointConnectionPreparer(ctx co
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -324,7 +324,7 @@ func (client DiskAccessesClient) GetPreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -404,7 +404,7 @@ func (client DiskAccessesClient) GetAPrivateEndpointConnectionPreparer(ctx conte
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -482,7 +482,7 @@ func (client DiskAccessesClient) GetPrivateLinkResourcesPreparer(ctx context.Con
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -558,7 +558,7 @@ func (client DiskAccessesClient) ListPreparer(ctx context.Context) (*http.Reques
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -674,7 +674,7 @@ func (client DiskAccessesClient) ListByResourceGroupPreparer(ctx context.Context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -794,7 +794,7 @@ func (client DiskAccessesClient) ListPrivateEndpointConnectionsPreparer(ctx cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -903,7 +903,7 @@ func (client DiskAccessesClient) UpdatePreparer(ctx context.Context, resourceGro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -998,7 +998,7 @@ func (client DiskAccessesClient) UpdateAPrivateEndpointConnectionPreparer(ctx co
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskencryptionsets.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskencryptionsets.go
index af979a6e8909..ee62e9aa74ef 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskencryptionsets.go
@@ -83,7 +83,7 @@ func (client DiskEncryptionSetsClient) CreateOrUpdatePreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -166,7 +166,7 @@ func (client DiskEncryptionSetsClient) DeletePreparer(ctx context.Context, resou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -253,7 +253,7 @@ func (client DiskEncryptionSetsClient) GetPreparer(ctx context.Context, resource
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -329,7 +329,7 @@ func (client DiskEncryptionSetsClient) ListPreparer(ctx context.Context) (*http.
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -449,7 +449,7 @@ func (client DiskEncryptionSetsClient) ListAssociatedResourcesPreparer(ctx conte
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -565,7 +565,7 @@ func (client DiskEncryptionSetsClient) ListByResourceGroupPreparer(ctx context.C
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -675,7 +675,7 @@ func (client DiskEncryptionSetsClient) UpdatePreparer(ctx context.Context, resou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskrestorepoint.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskrestorepoint.go
index d1fe4a66bc2e..dcc708c62850 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/diskrestorepoint.go
@@ -81,7 +81,7 @@ func (client DiskRestorePointClient) GetPreparer(ctx context.Context, resourceGr
"vmRestorePointName": autorest.Encode("path", VMRestorePointName),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -161,7 +161,7 @@ func (client DiskRestorePointClient) GrantAccessPreparer(ctx context.Context, re
"vmRestorePointName": autorest.Encode("path", VMRestorePointName),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -256,7 +256,7 @@ func (client DiskRestorePointClient) ListByRestorePointPreparer(ctx context.Cont
"vmRestorePointName": autorest.Encode("path", VMRestorePointName),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -366,7 +366,7 @@ func (client DiskRestorePointClient) RevokeAccessPreparer(ctx context.Context, r
"vmRestorePointName": autorest.Encode("path", VMRestorePointName),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/disks.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/disks.go
index 4aec8ae542ff..bef84adc4f22 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/disks.go
@@ -92,7 +92,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -177,7 +177,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -264,7 +264,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -342,7 +342,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -430,7 +430,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -546,7 +546,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -654,7 +654,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -735,7 +735,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/enums.go
similarity index 53%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/enums.go
index 1edd7683383f..15b22d526a93 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/enums.go
@@ -10,51 +10,81 @@ package compute
type AccessLevel string
const (
- // AccessLevelNone ...
- AccessLevelNone AccessLevel = "None"
- // AccessLevelRead ...
- AccessLevelRead AccessLevel = "Read"
- // AccessLevelWrite ...
- AccessLevelWrite AccessLevel = "Write"
+ // None ...
+ None AccessLevel = "None"
+ // Read ...
+ Read AccessLevel = "Read"
+ // Write ...
+ Write AccessLevel = "Write"
)
// PossibleAccessLevelValues returns an array of possible values for the AccessLevel const type.
func PossibleAccessLevelValues() []AccessLevel {
- return []AccessLevel{AccessLevelNone, AccessLevelRead, AccessLevelWrite}
+ return []AccessLevel{None, Read, Write}
}
// AggregatedReplicationState enumerates the values for aggregated replication state.
type AggregatedReplicationState string
const (
- // AggregatedReplicationStateCompleted ...
- AggregatedReplicationStateCompleted AggregatedReplicationState = "Completed"
- // AggregatedReplicationStateFailed ...
- AggregatedReplicationStateFailed AggregatedReplicationState = "Failed"
- // AggregatedReplicationStateInProgress ...
- AggregatedReplicationStateInProgress AggregatedReplicationState = "InProgress"
- // AggregatedReplicationStateUnknown ...
- AggregatedReplicationStateUnknown AggregatedReplicationState = "Unknown"
+ // Completed ...
+ Completed AggregatedReplicationState = "Completed"
+ // Failed ...
+ Failed AggregatedReplicationState = "Failed"
+ // InProgress ...
+ InProgress AggregatedReplicationState = "InProgress"
+ // Unknown ...
+ Unknown AggregatedReplicationState = "Unknown"
)
// PossibleAggregatedReplicationStateValues returns an array of possible values for the AggregatedReplicationState const type.
func PossibleAggregatedReplicationStateValues() []AggregatedReplicationState {
- return []AggregatedReplicationState{AggregatedReplicationStateCompleted, AggregatedReplicationStateFailed, AggregatedReplicationStateInProgress, AggregatedReplicationStateUnknown}
+ return []AggregatedReplicationState{Completed, Failed, InProgress, Unknown}
+}
+
+// Architecture enumerates the values for architecture.
+type Architecture string
+
+const (
+ // Arm64 ...
+ Arm64 Architecture = "Arm64"
+ // X64 ...
+ X64 Architecture = "x64"
+)
+
+// PossibleArchitectureValues returns an array of possible values for the Architecture const type.
+func PossibleArchitectureValues() []Architecture {
+ return []Architecture{Arm64, X64}
+}
+
+// ArchitectureTypes enumerates the values for architecture types.
+type ArchitectureTypes string
+
+const (
+ // ArchitectureTypesArm64 ...
+ ArchitectureTypesArm64 ArchitectureTypes = "Arm64"
+ // ArchitectureTypesX64 ...
+ ArchitectureTypesX64 ArchitectureTypes = "x64"
+)
+
+// PossibleArchitectureTypesValues returns an array of possible values for the ArchitectureTypes const type.
+func PossibleArchitectureTypesValues() []ArchitectureTypes {
+ return []ArchitectureTypes{ArchitectureTypesArm64, ArchitectureTypesX64}
}
// AvailabilitySetSkuTypes enumerates the values for availability set sku types.
type AvailabilitySetSkuTypes string
const (
- // AvailabilitySetSkuTypesAligned ...
- AvailabilitySetSkuTypesAligned AvailabilitySetSkuTypes = "Aligned"
- // AvailabilitySetSkuTypesClassic ...
- AvailabilitySetSkuTypesClassic AvailabilitySetSkuTypes = "Classic"
+ // Aligned ...
+ Aligned AvailabilitySetSkuTypes = "Aligned"
+ // Classic ...
+ Classic AvailabilitySetSkuTypes = "Classic"
)
// PossibleAvailabilitySetSkuTypesValues returns an array of possible values for the AvailabilitySetSkuTypes const type.
func PossibleAvailabilitySetSkuTypesValues() []AvailabilitySetSkuTypes {
- return []AvailabilitySetSkuTypes{AvailabilitySetSkuTypesAligned, AvailabilitySetSkuTypesClassic}
+ return []AvailabilitySetSkuTypes{Aligned, Classic}
}
// CachingTypes enumerates the values for caching types.
@@ -79,13 +109,13 @@ func PossibleCachingTypesValues() []CachingTypes {
type CapacityReservationGroupInstanceViewTypes string
const (
- // CapacityReservationGroupInstanceViewTypesInstanceView ...
- CapacityReservationGroupInstanceViewTypesInstanceView CapacityReservationGroupInstanceViewTypes = "instanceView"
+ // InstanceView ...
+ InstanceView CapacityReservationGroupInstanceViewTypes = "instanceView"
)
// PossibleCapacityReservationGroupInstanceViewTypesValues returns an array of possible values for the CapacityReservationGroupInstanceViewTypes const type.
func PossibleCapacityReservationGroupInstanceViewTypesValues() []CapacityReservationGroupInstanceViewTypes {
- return []CapacityReservationGroupInstanceViewTypes{CapacityReservationGroupInstanceViewTypesInstanceView}
+ return []CapacityReservationGroupInstanceViewTypes{InstanceView}
}
// CapacityReservationInstanceViewTypes enumerates the values for capacity reservation instance view types.
@@ -105,47 +135,81 @@ func PossibleCapacityReservationInstanceViewTypesValues() []CapacityReservationI
type CloudServiceUpgradeMode string
const (
- // CloudServiceUpgradeModeAuto ...
- CloudServiceUpgradeModeAuto CloudServiceUpgradeMode = "Auto"
- // CloudServiceUpgradeModeManual ...
- CloudServiceUpgradeModeManual CloudServiceUpgradeMode = "Manual"
- // CloudServiceUpgradeModeSimultaneous ...
- CloudServiceUpgradeModeSimultaneous CloudServiceUpgradeMode = "Simultaneous"
+ // Auto ...
+ Auto CloudServiceUpgradeMode = "Auto"
+ // Manual ...
+ Manual CloudServiceUpgradeMode = "Manual"
+ // Simultaneous ...
+ Simultaneous CloudServiceUpgradeMode = "Simultaneous"
)
// PossibleCloudServiceUpgradeModeValues returns an array of possible values for the CloudServiceUpgradeMode const type.
func PossibleCloudServiceUpgradeModeValues() []CloudServiceUpgradeMode {
- return []CloudServiceUpgradeMode{CloudServiceUpgradeModeAuto, CloudServiceUpgradeModeManual, CloudServiceUpgradeModeSimultaneous}
+ return []CloudServiceUpgradeMode{Auto, Manual, Simultaneous}
}
// ComponentNames enumerates the values for component names.
type ComponentNames string
const (
- // ComponentNamesMicrosoftWindowsShellSetup ...
- ComponentNamesMicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup"
+ // MicrosoftWindowsShellSetup ...
+ MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup"
)
// PossibleComponentNamesValues returns an array of possible values for the ComponentNames const type.
func PossibleComponentNamesValues() []ComponentNames {
- return []ComponentNames{ComponentNamesMicrosoftWindowsShellSetup}
+ return []ComponentNames{MicrosoftWindowsShellSetup}
+}
+
+// ConfidentialVMEncryptionType enumerates the values for confidential vm encryption type.
+type ConfidentialVMEncryptionType string
+
+const (
+ // EncryptedVMGuestStateOnlyWithPmk ...
+ EncryptedVMGuestStateOnlyWithPmk ConfidentialVMEncryptionType = "EncryptedVMGuestStateOnlyWithPmk"
+ // EncryptedWithCmk ...
+ EncryptedWithCmk ConfidentialVMEncryptionType = "EncryptedWithCmk"
+ // EncryptedWithPmk ...
+ EncryptedWithPmk ConfidentialVMEncryptionType = "EncryptedWithPmk"
+)
+
+// PossibleConfidentialVMEncryptionTypeValues returns an array of possible values for the ConfidentialVMEncryptionType const type.
+func PossibleConfidentialVMEncryptionTypeValues() []ConfidentialVMEncryptionType {
+ return []ConfidentialVMEncryptionType{EncryptedVMGuestStateOnlyWithPmk, EncryptedWithCmk, EncryptedWithPmk}
}
// ConsistencyModeTypes enumerates the values for consistency mode types.
type ConsistencyModeTypes string
const (
- // ConsistencyModeTypesApplicationConsistent ...
- ConsistencyModeTypesApplicationConsistent ConsistencyModeTypes = "ApplicationConsistent"
- // ConsistencyModeTypesCrashConsistent ...
- ConsistencyModeTypesCrashConsistent ConsistencyModeTypes = "CrashConsistent"
- // ConsistencyModeTypesFileSystemConsistent ...
- ConsistencyModeTypesFileSystemConsistent ConsistencyModeTypes = "FileSystemConsistent"
+ // ApplicationConsistent ...
+ ApplicationConsistent ConsistencyModeTypes = "ApplicationConsistent"
+ // CrashConsistent ...
+ CrashConsistent ConsistencyModeTypes = "CrashConsistent"
+ // FileSystemConsistent ...
+ FileSystemConsistent ConsistencyModeTypes = "FileSystemConsistent"
)
// PossibleConsistencyModeTypesValues returns an array of possible values for the ConsistencyModeTypes const type.
func PossibleConsistencyModeTypesValues() []ConsistencyModeTypes {
- return []ConsistencyModeTypes{ConsistencyModeTypesApplicationConsistent, ConsistencyModeTypesCrashConsistent, ConsistencyModeTypesFileSystemConsistent}
+ return []ConsistencyModeTypes{ApplicationConsistent, CrashConsistent, FileSystemConsistent}
+}
+
+// DataAccessAuthMode enumerates the values for data access auth mode.
+type DataAccessAuthMode string
+
+const (
+ // DataAccessAuthModeAzureActiveDirectory When export/upload URL is used, the system checks if the user has
+ // an identity in Azure Active Directory and has necessary permissions to export/upload the data. Please
+ // refer to aka.ms/DisksAzureADAuth.
+ DataAccessAuthModeAzureActiveDirectory DataAccessAuthMode = "AzureActiveDirectory"
+ // DataAccessAuthModeNone No additional authentication would be performed when accessing export/upload URL.
+ DataAccessAuthModeNone DataAccessAuthMode = "None"
+)
+
+// PossibleDataAccessAuthModeValues returns an array of possible values for the DataAccessAuthMode const type.
+func PossibleDataAccessAuthModeValues() []DataAccessAuthMode {
+ return []DataAccessAuthMode{DataAccessAuthModeAzureActiveDirectory, DataAccessAuthModeNone}
}
// DedicatedHostLicenseTypes enumerates the values for dedicated host license types.
@@ -169,75 +233,82 @@ func PossibleDedicatedHostLicenseTypesValues() []DedicatedHostLicenseTypes {
type DeleteOptions string
const (
- // DeleteOptionsDelete ...
- DeleteOptionsDelete DeleteOptions = "Delete"
- // DeleteOptionsDetach ...
- DeleteOptionsDetach DeleteOptions = "Detach"
+ // Delete ...
+ Delete DeleteOptions = "Delete"
+ // Detach ...
+ Detach DeleteOptions = "Detach"
)
// PossibleDeleteOptionsValues returns an array of possible values for the DeleteOptions const type.
func PossibleDeleteOptionsValues() []DeleteOptions {
- return []DeleteOptions{DeleteOptionsDelete, DeleteOptionsDetach}
+ return []DeleteOptions{Delete, Detach}
}
// DiffDiskOptions enumerates the values for diff disk options.
type DiffDiskOptions string
const (
- // DiffDiskOptionsLocal ...
- DiffDiskOptionsLocal DiffDiskOptions = "Local"
+ // Local ...
+ Local DiffDiskOptions = "Local"
)
// PossibleDiffDiskOptionsValues returns an array of possible values for the DiffDiskOptions const type.
func PossibleDiffDiskOptionsValues() []DiffDiskOptions {
- return []DiffDiskOptions{DiffDiskOptionsLocal}
+ return []DiffDiskOptions{Local}
}
// DiffDiskPlacement enumerates the values for diff disk placement.
type DiffDiskPlacement string
const (
- // DiffDiskPlacementCacheDisk ...
- DiffDiskPlacementCacheDisk DiffDiskPlacement = "CacheDisk"
- // DiffDiskPlacementResourceDisk ...
- DiffDiskPlacementResourceDisk DiffDiskPlacement = "ResourceDisk"
+ // CacheDisk ...
+ CacheDisk DiffDiskPlacement = "CacheDisk"
+ // ResourceDisk ...
+ ResourceDisk DiffDiskPlacement = "ResourceDisk"
)
// PossibleDiffDiskPlacementValues returns an array of possible values for the DiffDiskPlacement const type.
func PossibleDiffDiskPlacementValues() []DiffDiskPlacement {
- return []DiffDiskPlacement{DiffDiskPlacementCacheDisk, DiffDiskPlacementResourceDisk}
+ return []DiffDiskPlacement{CacheDisk, ResourceDisk}
}
// DiskCreateOption enumerates the values for disk create option.
type DiskCreateOption string
const (
- // DiskCreateOptionAttach Disk will be attached to a VM.
- DiskCreateOptionAttach DiskCreateOption = "Attach"
- // DiskCreateOptionCopy Create a new disk or snapshot by copying from a disk or snapshot specified by the
- // given sourceResourceId.
- DiskCreateOptionCopy DiskCreateOption = "Copy"
- // DiskCreateOptionCopyStart Create a new disk by using a deep copy process, where the resource creation is
- // considered complete only after all data has been copied from the source.
- DiskCreateOptionCopyStart DiskCreateOption = "CopyStart"
- // DiskCreateOptionEmpty Create an empty data disk of a size given by diskSizeGB.
- DiskCreateOptionEmpty DiskCreateOption = "Empty"
- // DiskCreateOptionFromImage Create a new disk from a platform image specified by the given imageReference
- // or galleryImageReference.
- DiskCreateOptionFromImage DiskCreateOption = "FromImage"
- // DiskCreateOptionImport Create a disk by importing from a blob specified by a sourceUri in a storage
- // account specified by storageAccountId.
- DiskCreateOptionImport DiskCreateOption = "Import"
- // DiskCreateOptionRestore Create a new disk by copying from a backup recovery point.
- DiskCreateOptionRestore DiskCreateOption = "Restore"
- // DiskCreateOptionUpload Create a new disk by obtaining a write token and using it to directly upload the
- // contents of the disk.
- DiskCreateOptionUpload DiskCreateOption = "Upload"
+ // Attach Disk will be attached to a VM.
+ Attach DiskCreateOption = "Attach"
+ // Copy Create a new disk or snapshot by copying from a disk or snapshot specified by the given
+ // sourceResourceId.
+ Copy DiskCreateOption = "Copy"
+ // CopyStart Create a new disk by using a deep copy process, where the resource creation is considered
+ // complete only after all data has been copied from the source.
+ CopyStart DiskCreateOption = "CopyStart"
+ // Empty Create an empty data disk of a size given by diskSizeGB.
+ Empty DiskCreateOption = "Empty"
+ // FromImage Create a new disk from a platform image specified by the given imageReference or
+ // galleryImageReference.
+ FromImage DiskCreateOption = "FromImage"
+ // Import Create a disk by importing from a blob specified by a sourceUri in a storage account specified by
+ // storageAccountId.
+ Import DiskCreateOption = "Import"
+ // ImportSecure Similar to Import create option. Create a new Trusted Launch VM or Confidential VM
+ // supported disk by importing additional blob for VM guest state specified by securityDataUri in storage
+ // account specified by storageAccountId
+ ImportSecure DiskCreateOption = "ImportSecure"
+ // Restore Create a new disk by copying from a backup recovery point.
+ Restore DiskCreateOption = "Restore"
+ // Upload Create a new disk by obtaining a write token and using it to directly upload the contents of the
+ // disk.
+ Upload DiskCreateOption = "Upload"
+ // UploadPreparedSecure Similar to Upload create option. Create a new Trusted Launch VM or Confidential VM
+ // supported disk and upload using write token in both disk and VM guest state
+ UploadPreparedSecure DiskCreateOption = "UploadPreparedSecure"
)
// PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type.
func PossibleDiskCreateOptionValues() []DiskCreateOption {
- return []DiskCreateOption{DiskCreateOptionAttach, DiskCreateOptionCopy, DiskCreateOptionCopyStart, DiskCreateOptionEmpty, DiskCreateOptionFromImage, DiskCreateOptionImport, DiskCreateOptionRestore, DiskCreateOptionUpload}
+ return []DiskCreateOption{Attach, Copy, CopyStart, Empty, FromImage, Import, ImportSecure, Restore, Upload, UploadPreparedSecure}
}
// DiskCreateOptionTypes enumerates the values for disk create option types.
@@ -276,13 +347,13 @@ func PossibleDiskDeleteOptionTypesValues() []DiskDeleteOptionTypes {
type DiskDetachOptionTypes string
const (
- // DiskDetachOptionTypesForceDetach ...
- DiskDetachOptionTypesForceDetach DiskDetachOptionTypes = "ForceDetach"
+ // ForceDetach ...
+ ForceDetach DiskDetachOptionTypes = "ForceDetach"
)
// PossibleDiskDetachOptionTypesValues returns an array of possible values for the DiskDetachOptionTypes const type.
func PossibleDiskDetachOptionTypesValues() []DiskDetachOptionTypes {
- return []DiskDetachOptionTypes{DiskDetachOptionTypesForceDetach}
+ return []DiskDetachOptionTypes{ForceDetach}
}
// DiskEncryptionSetIdentityType enumerates the values for disk encryption set identity type.
@@ -304,90 +375,101 @@ func PossibleDiskEncryptionSetIdentityTypeValues() []DiskEncryptionSetIdentityTy
type DiskEncryptionSetType string
const (
- // DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey Resource using diskEncryptionSet would be encrypted
- // at rest with Customer managed key that can be changed and revoked by a customer.
- DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey DiskEncryptionSetType = "EncryptionAtRestWithCustomerKey"
- // DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys Resource using diskEncryptionSet would
- // be encrypted at rest with two layers of encryption. One of the keys is Customer managed and the other
- // key is Platform managed.
- DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys DiskEncryptionSetType = "EncryptionAtRestWithPlatformAndCustomerKeys"
+ // ConfidentialVMEncryptedWithCustomerKey Confidential VM supported disk and VM guest state would be
+ // encrypted with customer managed key.
+ ConfidentialVMEncryptedWithCustomerKey DiskEncryptionSetType = "ConfidentialVmEncryptedWithCustomerKey"
+ // EncryptionAtRestWithCustomerKey Resource using diskEncryptionSet would be encrypted at rest with
+ // Customer managed key that can be changed and revoked by a customer.
+ EncryptionAtRestWithCustomerKey DiskEncryptionSetType = "EncryptionAtRestWithCustomerKey"
+ // EncryptionAtRestWithPlatformAndCustomerKeys Resource using diskEncryptionSet would be encrypted at rest
+ // with two layers of encryption. One of the keys is Customer managed and the other key is Platform
+ // managed.
+ EncryptionAtRestWithPlatformAndCustomerKeys DiskEncryptionSetType = "EncryptionAtRestWithPlatformAndCustomerKeys"
)
// PossibleDiskEncryptionSetTypeValues returns an array of possible values for the DiskEncryptionSetType const type.
func PossibleDiskEncryptionSetTypeValues() []DiskEncryptionSetType {
- return []DiskEncryptionSetType{DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey, DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys}
+ return []DiskEncryptionSetType{ConfidentialVMEncryptedWithCustomerKey, EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys}
}
// DiskSecurityTypes enumerates the values for disk security types.
type DiskSecurityTypes string
const (
- // DiskSecurityTypesTrustedLaunch Trusted Launch provides security features such as secure boot and virtual
- // Trusted Platform Module (vTPM)
- DiskSecurityTypesTrustedLaunch DiskSecurityTypes = "TrustedLaunch"
+ // ConfidentialVMDiskEncryptedWithCustomerKey Indicates Confidential VM disk with both OS disk and VM guest
+ // state encrypted with a customer managed key
+ ConfidentialVMDiskEncryptedWithCustomerKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithCustomerKey"
+ // ConfidentialVMDiskEncryptedWithPlatformKey Indicates Confidential VM disk with both OS disk and VM guest
+ // state encrypted with a platform managed key
+ ConfidentialVMDiskEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithPlatformKey"
+ // ConfidentialVMVMGuestStateOnlyEncryptedWithPlatformKey Indicates Confidential VM disk with only VM guest
+ // state encrypted
+ ConfidentialVMVMGuestStateOnlyEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey"
+ // TrustedLaunch Trusted Launch provides security features such as secure boot and virtual Trusted Platform
+ // Module (vTPM)
+ TrustedLaunch DiskSecurityTypes = "TrustedLaunch"
)
// PossibleDiskSecurityTypesValues returns an array of possible values for the DiskSecurityTypes const type.
func PossibleDiskSecurityTypesValues() []DiskSecurityTypes {
- return []DiskSecurityTypes{DiskSecurityTypesTrustedLaunch}
+ return []DiskSecurityTypes{ConfidentialVMDiskEncryptedWithCustomerKey, ConfidentialVMDiskEncryptedWithPlatformKey, ConfidentialVMVMGuestStateOnlyEncryptedWithPlatformKey, TrustedLaunch}
}
// DiskState enumerates the values for disk state.
type DiskState string
const (
- // DiskStateActiveSAS The disk currently has an Active SAS Uri associated with it.
- DiskStateActiveSAS DiskState = "ActiveSAS"
- // DiskStateActiveSASFrozen The disk is attached to a VM in hibernated state and has an active SAS URI
- // associated with it.
- DiskStateActiveSASFrozen DiskState = "ActiveSASFrozen"
- // DiskStateActiveUpload A disk is created for upload and a write token has been issued for uploading to
- // it.
- DiskStateActiveUpload DiskState = "ActiveUpload"
- // DiskStateAttached The disk is currently attached to a running VM.
- DiskStateAttached DiskState = "Attached"
- // DiskStateFrozen The disk is attached to a VM which is in hibernated state.
- DiskStateFrozen DiskState = "Frozen"
- // DiskStateReadyToUpload A disk is ready to be created by upload by requesting a write token.
- DiskStateReadyToUpload DiskState = "ReadyToUpload"
- // DiskStateReserved The disk is attached to a stopped-deallocated VM.
- DiskStateReserved DiskState = "Reserved"
- // DiskStateUnattached The disk is not being used and can be attached to a VM.
- DiskStateUnattached DiskState = "Unattached"
+ // ActiveSAS The disk currently has an Active SAS Uri associated with it.
+ ActiveSAS DiskState = "ActiveSAS"
+ // ActiveSASFrozen The disk is attached to a VM in hibernated state and has an active SAS URI associated
+ // with it.
+ ActiveSASFrozen DiskState = "ActiveSASFrozen"
+ // ActiveUpload A disk is created for upload and a write token has been issued for uploading to it.
+ ActiveUpload DiskState = "ActiveUpload"
+ // Attached The disk is currently attached to a running VM.
+ Attached DiskState = "Attached"
+ // Frozen The disk is attached to a VM which is in hibernated state.
+ Frozen DiskState = "Frozen"
+ // ReadyToUpload A disk is ready to be created by upload by requesting a write token.
+ ReadyToUpload DiskState = "ReadyToUpload"
+ // Reserved The disk is attached to a stopped-deallocated VM.
+ Reserved DiskState = "Reserved"
+ // Unattached The disk is not being used and can be attached to a VM.
+ Unattached DiskState = "Unattached"
)
// PossibleDiskStateValues returns an array of possible values for the DiskState const type.
func PossibleDiskStateValues() []DiskState {
- return []DiskState{DiskStateActiveSAS, DiskStateActiveSASFrozen, DiskStateActiveUpload, DiskStateAttached, DiskStateFrozen, DiskStateReadyToUpload, DiskStateReserved, DiskStateUnattached}
+ return []DiskState{ActiveSAS, ActiveSASFrozen, ActiveUpload, Attached, Frozen, ReadyToUpload, Reserved, Unattached}
}
// DiskStorageAccountTypes enumerates the values for disk storage account types.
type DiskStorageAccountTypes string
const (
- // DiskStorageAccountTypesPremiumLRS Premium SSD locally redundant storage. Best for production and
- // performance sensitive workloads.
- DiskStorageAccountTypesPremiumLRS DiskStorageAccountTypes = "Premium_LRS"
- // DiskStorageAccountTypesPremiumZRS Premium SSD zone redundant storage. Best for the production workloads
- // that need storage resiliency against zone failures.
- DiskStorageAccountTypesPremiumZRS DiskStorageAccountTypes = "Premium_ZRS"
- // DiskStorageAccountTypesStandardLRS Standard HDD locally redundant storage. Best for backup,
- // non-critical, and infrequent access.
- DiskStorageAccountTypesStandardLRS DiskStorageAccountTypes = "Standard_LRS"
- // DiskStorageAccountTypesStandardSSDLRS Standard SSD locally redundant storage. Best for web servers,
- // lightly used enterprise applications and dev/test.
- DiskStorageAccountTypesStandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS"
- // DiskStorageAccountTypesStandardSSDZRS Standard SSD zone redundant storage. Best for web servers, lightly
- // used enterprise applications and dev/test that need storage resiliency against zone failures.
- DiskStorageAccountTypesStandardSSDZRS DiskStorageAccountTypes = "StandardSSD_ZRS"
- // DiskStorageAccountTypesUltraSSDLRS Ultra SSD locally redundant storage. Best for IO-intensive workloads
- // such as SAP HANA, top tier databases (for example, SQL, Oracle), and other transaction-heavy workloads.
- DiskStorageAccountTypesUltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS"
+ // PremiumLRS Premium SSD locally redundant storage. Best for production and performance sensitive
+ // workloads.
+ PremiumLRS DiskStorageAccountTypes = "Premium_LRS"
+ // PremiumZRS Premium SSD zone redundant storage. Best for the production workloads that need storage
+ // resiliency against zone failures.
+ PremiumZRS DiskStorageAccountTypes = "Premium_ZRS"
+ // StandardLRS Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent
+ // access.
+ StandardLRS DiskStorageAccountTypes = "Standard_LRS"
+ // StandardSSDLRS Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
+ // applications and dev/test.
+ StandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS"
+ // StandardSSDZRS Standard SSD zone redundant storage. Best for web servers, lightly used enterprise
+ // applications and dev/test that need storage resiliency against zone failures.
+ StandardSSDZRS DiskStorageAccountTypes = "StandardSSD_ZRS"
+ // UltraSSDLRS Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, top
+ // tier databases (for example, SQL, Oracle), and other transaction-heavy workloads.
+ UltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS"
)
// PossibleDiskStorageAccountTypesValues returns an array of possible values for the DiskStorageAccountTypes const type.
func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes {
- return []DiskStorageAccountTypes{DiskStorageAccountTypesPremiumLRS, DiskStorageAccountTypesPremiumZRS, DiskStorageAccountTypesStandardLRS, DiskStorageAccountTypesStandardSSDLRS, DiskStorageAccountTypesStandardSSDZRS, DiskStorageAccountTypesUltraSSDLRS}
+ return []DiskStorageAccountTypes{PremiumLRS, PremiumZRS, StandardLRS, StandardSSDLRS, StandardSSDZRS, UltraSSDLRS}
}
// EncryptionType enumerates the values for encryption type.
@@ -440,41 +522,41 @@ func PossibleExecutionStateValues() []ExecutionState {
type ExpandTypesForGetCapacityReservationGroups string
const (
- // ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref ...
- ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref ExpandTypesForGetCapacityReservationGroups = "virtualMachineScaleSetVMs/$ref"
- // ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref ...
- ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref ExpandTypesForGetCapacityReservationGroups = "virtualMachines/$ref"
+ // VirtualMachineScaleSetVMsref ...
+ VirtualMachineScaleSetVMsref ExpandTypesForGetCapacityReservationGroups = "virtualMachineScaleSetVMs/$ref"
+ // VirtualMachinesref ...
+ VirtualMachinesref ExpandTypesForGetCapacityReservationGroups = "virtualMachines/$ref"
)
// PossibleExpandTypesForGetCapacityReservationGroupsValues returns an array of possible values for the ExpandTypesForGetCapacityReservationGroups const type.
func PossibleExpandTypesForGetCapacityReservationGroupsValues() []ExpandTypesForGetCapacityReservationGroups {
- return []ExpandTypesForGetCapacityReservationGroups{ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref, ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref}
+ return []ExpandTypesForGetCapacityReservationGroups{VirtualMachineScaleSetVMsref, VirtualMachinesref}
}
// ExpandTypesForGetVMScaleSets enumerates the values for expand types for get vm scale sets.
type ExpandTypesForGetVMScaleSets string
const (
- // ExpandTypesForGetVMScaleSetsUserData ...
- ExpandTypesForGetVMScaleSetsUserData ExpandTypesForGetVMScaleSets = "userData"
+ // UserData ...
+ UserData ExpandTypesForGetVMScaleSets = "userData"
)
// PossibleExpandTypesForGetVMScaleSetsValues returns an array of possible values for the ExpandTypesForGetVMScaleSets const type.
func PossibleExpandTypesForGetVMScaleSetsValues() []ExpandTypesForGetVMScaleSets {
- return []ExpandTypesForGetVMScaleSets{ExpandTypesForGetVMScaleSetsUserData}
+ return []ExpandTypesForGetVMScaleSets{UserData}
}
// ExtendedLocationType enumerates the values for extended location type.
type ExtendedLocationType string
const (
- // ExtendedLocationTypeEdgeZone ...
- ExtendedLocationTypeEdgeZone ExtendedLocationType = "EdgeZone"
+ // EdgeZone ...
+ EdgeZone ExtendedLocationType = "EdgeZone"
)
// PossibleExtendedLocationTypeValues returns an array of possible values for the ExtendedLocationType const type.
func PossibleExtendedLocationTypeValues() []ExtendedLocationType {
- return []ExtendedLocationType{ExtendedLocationTypeEdgeZone}
+ return []ExtendedLocationType{EdgeZone}
}
// ExtendedLocationTypes enumerates the values for extended location types.
@@ -490,19 +572,47 @@ func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes {
return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone}
}
+// GalleryExpandParams enumerates the values for gallery expand params.
+type GalleryExpandParams string
+
+const (
+ // SharingProfileGroups ...
+ SharingProfileGroups GalleryExpandParams = "SharingProfile/Groups"
+)
+
+// PossibleGalleryExpandParamsValues returns an array of possible values for the GalleryExpandParams const type.
+func PossibleGalleryExpandParamsValues() []GalleryExpandParams {
+ return []GalleryExpandParams{SharingProfileGroups}
+}
+
+// GalleryExtendedLocationType enumerates the values for gallery extended location type.
+type GalleryExtendedLocationType string
+
+const (
+ // GalleryExtendedLocationTypeEdgeZone ...
+ GalleryExtendedLocationTypeEdgeZone GalleryExtendedLocationType = "EdgeZone"
+ // GalleryExtendedLocationTypeUnknown ...
+ GalleryExtendedLocationTypeUnknown GalleryExtendedLocationType = "Unknown"
+)
+
+// PossibleGalleryExtendedLocationTypeValues returns an array of possible values for the GalleryExtendedLocationType const type.
+func PossibleGalleryExtendedLocationTypeValues() []GalleryExtendedLocationType {
+ return []GalleryExtendedLocationType{GalleryExtendedLocationTypeEdgeZone, GalleryExtendedLocationTypeUnknown}
+}
+
// GallerySharingPermissionTypes enumerates the values for gallery sharing permission types.
type GallerySharingPermissionTypes string
const (
- // GallerySharingPermissionTypesGroups ...
- GallerySharingPermissionTypesGroups GallerySharingPermissionTypes = "Groups"
- // GallerySharingPermissionTypesPrivate ...
- GallerySharingPermissionTypesPrivate GallerySharingPermissionTypes = "Private"
+ // Groups ...
+ Groups GallerySharingPermissionTypes = "Groups"
+ // Private ...
+ Private GallerySharingPermissionTypes = "Private"
)
// PossibleGallerySharingPermissionTypesValues returns an array of possible values for the GallerySharingPermissionTypes const type.
func PossibleGallerySharingPermissionTypesValues() []GallerySharingPermissionTypes {
- return []GallerySharingPermissionTypes{GallerySharingPermissionTypesGroups, GallerySharingPermissionTypesPrivate}
+ return []GallerySharingPermissionTypes{Groups, Private}
}
// HostCaching enumerates the values for host caching.
@@ -526,15 +636,15 @@ func PossibleHostCachingValues() []HostCaching {
type HyperVGeneration string
const (
- // HyperVGenerationV1 ...
- HyperVGenerationV1 HyperVGeneration = "V1"
- // HyperVGenerationV2 ...
- HyperVGenerationV2 HyperVGeneration = "V2"
+ // V1 ...
+ V1 HyperVGeneration = "V1"
+ // V2 ...
+ V2 HyperVGeneration = "V2"
)
// PossibleHyperVGenerationValues returns an array of possible values for the HyperVGeneration const type.
func PossibleHyperVGenerationValues() []HyperVGeneration {
- return []HyperVGeneration{HyperVGenerationV1, HyperVGenerationV2}
+ return []HyperVGeneration{V1, V2}
}
// HyperVGenerationType enumerates the values for hyper v generation type.
@@ -586,34 +696,34 @@ func PossibleInstanceViewTypesValues() []InstanceViewTypes {
type IntervalInMins string
const (
- // IntervalInMinsFiveMins ...
- IntervalInMinsFiveMins IntervalInMins = "FiveMins"
- // IntervalInMinsSixtyMins ...
- IntervalInMinsSixtyMins IntervalInMins = "SixtyMins"
- // IntervalInMinsThirtyMins ...
- IntervalInMinsThirtyMins IntervalInMins = "ThirtyMins"
- // IntervalInMinsThreeMins ...
- IntervalInMinsThreeMins IntervalInMins = "ThreeMins"
+ // FiveMins ...
+ FiveMins IntervalInMins = "FiveMins"
+ // SixtyMins ...
+ SixtyMins IntervalInMins = "SixtyMins"
+ // ThirtyMins ...
+ ThirtyMins IntervalInMins = "ThirtyMins"
+ // ThreeMins ...
+ ThreeMins IntervalInMins = "ThreeMins"
)
// PossibleIntervalInMinsValues returns an array of possible values for the IntervalInMins const type.
func PossibleIntervalInMinsValues() []IntervalInMins {
- return []IntervalInMins{IntervalInMinsFiveMins, IntervalInMinsSixtyMins, IntervalInMinsThirtyMins, IntervalInMinsThreeMins}
+ return []IntervalInMins{FiveMins, SixtyMins, ThirtyMins, ThreeMins}
}
// IPVersion enumerates the values for ip version.
type IPVersion string
const (
- // IPVersionIPv4 ...
- IPVersionIPv4 IPVersion = "IPv4"
- // IPVersionIPv6 ...
- IPVersionIPv6 IPVersion = "IPv6"
+ // IPv4 ...
+ IPv4 IPVersion = "IPv4"
+ // IPv6 ...
+ IPv6 IPVersion = "IPv6"
)
// PossibleIPVersionValues returns an array of possible values for the IPVersion const type.
func PossibleIPVersionValues() []IPVersion {
- return []IPVersion{IPVersionIPv4, IPVersionIPv6}
+ return []IPVersion{IPv4, IPv6}
}
// IPVersions enumerates the values for ip versions.
@@ -635,15 +745,35 @@ func PossibleIPVersionsValues() []IPVersions {
type LinuxPatchAssessmentMode string
const (
- // LinuxPatchAssessmentModeAutomaticByPlatform ...
- LinuxPatchAssessmentModeAutomaticByPlatform LinuxPatchAssessmentMode = "AutomaticByPlatform"
- // LinuxPatchAssessmentModeImageDefault ...
- LinuxPatchAssessmentModeImageDefault LinuxPatchAssessmentMode = "ImageDefault"
+ // AutomaticByPlatform ...
+ AutomaticByPlatform LinuxPatchAssessmentMode = "AutomaticByPlatform"
+ // ImageDefault ...
+ ImageDefault LinuxPatchAssessmentMode = "ImageDefault"
)
// PossibleLinuxPatchAssessmentModeValues returns an array of possible values for the LinuxPatchAssessmentMode const type.
func PossibleLinuxPatchAssessmentModeValues() []LinuxPatchAssessmentMode {
- return []LinuxPatchAssessmentMode{LinuxPatchAssessmentModeAutomaticByPlatform, LinuxPatchAssessmentModeImageDefault}
+ return []LinuxPatchAssessmentMode{AutomaticByPlatform, ImageDefault}
+}
+
+// LinuxVMGuestPatchAutomaticByPlatformRebootSetting enumerates the values for linux vm guest patch automatic
+// by platform reboot setting.
+type LinuxVMGuestPatchAutomaticByPlatformRebootSetting string
+
+const (
+ // LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways ...
+ LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "Always"
+ // LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired ...
+ LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "IfRequired"
+ // LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever ...
+ LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "Never"
+ // LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown ...
+ LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "Unknown"
+)
+
+// PossibleLinuxVMGuestPatchAutomaticByPlatformRebootSettingValues returns an array of possible values for the LinuxVMGuestPatchAutomaticByPlatformRebootSetting const type.
+func PossibleLinuxVMGuestPatchAutomaticByPlatformRebootSettingValues() []LinuxVMGuestPatchAutomaticByPlatformRebootSetting {
+ return []LinuxVMGuestPatchAutomaticByPlatformRebootSetting{LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways, LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired, LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever, LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown}
}
// LinuxVMGuestPatchMode enumerates the values for linux vm guest patch mode.
@@ -684,61 +814,60 @@ func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationR
type NetworkAccessPolicy string
const (
- // NetworkAccessPolicyAllowAll The disk can be exported or uploaded to from any network.
- NetworkAccessPolicyAllowAll NetworkAccessPolicy = "AllowAll"
- // NetworkAccessPolicyAllowPrivate The disk can be exported or uploaded to using a DiskAccess resource's
- // private endpoints.
- NetworkAccessPolicyAllowPrivate NetworkAccessPolicy = "AllowPrivate"
- // NetworkAccessPolicyDenyAll The disk cannot be exported.
- NetworkAccessPolicyDenyAll NetworkAccessPolicy = "DenyAll"
+ // AllowAll The disk can be exported or uploaded to from any network.
+ AllowAll NetworkAccessPolicy = "AllowAll"
+ // AllowPrivate The disk can be exported or uploaded to using a DiskAccess resource's private endpoints.
+ AllowPrivate NetworkAccessPolicy = "AllowPrivate"
+ // DenyAll The disk cannot be exported.
+ DenyAll NetworkAccessPolicy = "DenyAll"
)
// PossibleNetworkAccessPolicyValues returns an array of possible values for the NetworkAccessPolicy const type.
func PossibleNetworkAccessPolicyValues() []NetworkAccessPolicy {
- return []NetworkAccessPolicy{NetworkAccessPolicyAllowAll, NetworkAccessPolicyAllowPrivate, NetworkAccessPolicyDenyAll}
+ return []NetworkAccessPolicy{AllowAll, AllowPrivate, DenyAll}
}
// NetworkAPIVersion enumerates the values for network api version.
type NetworkAPIVersion string
const (
- // NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne ...
- NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne NetworkAPIVersion = "2020-11-01"
+ // TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne ...
+ TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne NetworkAPIVersion = "2020-11-01"
)
// PossibleNetworkAPIVersionValues returns an array of possible values for the NetworkAPIVersion const type.
func PossibleNetworkAPIVersionValues() []NetworkAPIVersion {
- return []NetworkAPIVersion{NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne}
+ return []NetworkAPIVersion{TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne}
}
// OperatingSystemStateTypes enumerates the values for operating system state types.
type OperatingSystemStateTypes string
const (
- // OperatingSystemStateTypesGeneralized Generalized image. Needs to be provisioned during deployment time.
- OperatingSystemStateTypesGeneralized OperatingSystemStateTypes = "Generalized"
- // OperatingSystemStateTypesSpecialized Specialized image. Contains already provisioned OS Disk.
- OperatingSystemStateTypesSpecialized OperatingSystemStateTypes = "Specialized"
+ // Generalized Generalized image. Needs to be provisioned during deployment time.
+ Generalized OperatingSystemStateTypes = "Generalized"
+ // Specialized Specialized image. Contains already provisioned OS Disk.
+ Specialized OperatingSystemStateTypes = "Specialized"
)
// PossibleOperatingSystemStateTypesValues returns an array of possible values for the OperatingSystemStateTypes const type.
func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes {
- return []OperatingSystemStateTypes{OperatingSystemStateTypesGeneralized, OperatingSystemStateTypesSpecialized}
+ return []OperatingSystemStateTypes{Generalized, Specialized}
}
// OperatingSystemType enumerates the values for operating system type.
type OperatingSystemType string
const (
- // OperatingSystemTypeLinux ...
- OperatingSystemTypeLinux OperatingSystemType = "Linux"
- // OperatingSystemTypeWindows ...
- OperatingSystemTypeWindows OperatingSystemType = "Windows"
+ // Linux ...
+ Linux OperatingSystemType = "Linux"
+ // Windows ...
+ Windows OperatingSystemType = "Windows"
)
// PossibleOperatingSystemTypeValues returns an array of possible values for the OperatingSystemType const type.
func PossibleOperatingSystemTypeValues() []OperatingSystemType {
- return []OperatingSystemType{OperatingSystemTypeLinux, OperatingSystemTypeWindows}
+ return []OperatingSystemType{Linux, Windows}
}
// OperatingSystemTypes enumerates the values for operating system types.
@@ -760,73 +889,73 @@ func PossibleOperatingSystemTypesValues() []OperatingSystemTypes {
type OrchestrationMode string
const (
- // OrchestrationModeFlexible ...
- OrchestrationModeFlexible OrchestrationMode = "Flexible"
- // OrchestrationModeUniform ...
- OrchestrationModeUniform OrchestrationMode = "Uniform"
+ // Flexible ...
+ Flexible OrchestrationMode = "Flexible"
+ // Uniform ...
+ Uniform OrchestrationMode = "Uniform"
)
// PossibleOrchestrationModeValues returns an array of possible values for the OrchestrationMode const type.
func PossibleOrchestrationModeValues() []OrchestrationMode {
- return []OrchestrationMode{OrchestrationModeFlexible, OrchestrationModeUniform}
+ return []OrchestrationMode{Flexible, Uniform}
}
// OrchestrationServiceNames enumerates the values for orchestration service names.
type OrchestrationServiceNames string
const (
- // OrchestrationServiceNamesAutomaticRepairs ...
- OrchestrationServiceNamesAutomaticRepairs OrchestrationServiceNames = "AutomaticRepairs"
+ // AutomaticRepairs ...
+ AutomaticRepairs OrchestrationServiceNames = "AutomaticRepairs"
)
// PossibleOrchestrationServiceNamesValues returns an array of possible values for the OrchestrationServiceNames const type.
func PossibleOrchestrationServiceNamesValues() []OrchestrationServiceNames {
- return []OrchestrationServiceNames{OrchestrationServiceNamesAutomaticRepairs}
+ return []OrchestrationServiceNames{AutomaticRepairs}
}
// OrchestrationServiceState enumerates the values for orchestration service state.
type OrchestrationServiceState string
const (
- // OrchestrationServiceStateNotRunning ...
- OrchestrationServiceStateNotRunning OrchestrationServiceState = "NotRunning"
- // OrchestrationServiceStateRunning ...
- OrchestrationServiceStateRunning OrchestrationServiceState = "Running"
- // OrchestrationServiceStateSuspended ...
- OrchestrationServiceStateSuspended OrchestrationServiceState = "Suspended"
+ // NotRunning ...
+ NotRunning OrchestrationServiceState = "NotRunning"
+ // Running ...
+ Running OrchestrationServiceState = "Running"
+ // Suspended ...
+ Suspended OrchestrationServiceState = "Suspended"
)
// PossibleOrchestrationServiceStateValues returns an array of possible values for the OrchestrationServiceState const type.
func PossibleOrchestrationServiceStateValues() []OrchestrationServiceState {
- return []OrchestrationServiceState{OrchestrationServiceStateNotRunning, OrchestrationServiceStateRunning, OrchestrationServiceStateSuspended}
+ return []OrchestrationServiceState{NotRunning, Running, Suspended}
}
// OrchestrationServiceStateAction enumerates the values for orchestration service state action.
type OrchestrationServiceStateAction string
const (
- // OrchestrationServiceStateActionResume ...
- OrchestrationServiceStateActionResume OrchestrationServiceStateAction = "Resume"
- // OrchestrationServiceStateActionSuspend ...
- OrchestrationServiceStateActionSuspend OrchestrationServiceStateAction = "Suspend"
+ // Resume ...
+ Resume OrchestrationServiceStateAction = "Resume"
+ // Suspend ...
+ Suspend OrchestrationServiceStateAction = "Suspend"
)
// PossibleOrchestrationServiceStateActionValues returns an array of possible values for the OrchestrationServiceStateAction const type.
func PossibleOrchestrationServiceStateActionValues() []OrchestrationServiceStateAction {
- return []OrchestrationServiceStateAction{OrchestrationServiceStateActionResume, OrchestrationServiceStateActionSuspend}
+ return []OrchestrationServiceStateAction{Resume, Suspend}
}
// PassNames enumerates the values for pass names.
type PassNames string
const (
- // PassNamesOobeSystem ...
- PassNamesOobeSystem PassNames = "OobeSystem"
+ // OobeSystem ...
+ OobeSystem PassNames = "OobeSystem"
)
// PossiblePassNamesValues returns an array of possible values for the PassNames const type.
func PossiblePassNamesValues() []PassNames {
- return []PassNames{PassNamesOobeSystem}
+ return []PassNames{OobeSystem}
}
// PatchAssessmentState enumerates the values for patch assessment state.
@@ -912,32 +1041,32 @@ func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpoin
type PrivateEndpointServiceConnectionStatus string
const (
- // PrivateEndpointServiceConnectionStatusApproved ...
- PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved"
- // PrivateEndpointServiceConnectionStatusPending ...
- PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending"
- // PrivateEndpointServiceConnectionStatusRejected ...
- PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected"
+ // Approved ...
+ Approved PrivateEndpointServiceConnectionStatus = "Approved"
+ // Pending ...
+ Pending PrivateEndpointServiceConnectionStatus = "Pending"
+ // Rejected ...
+ Rejected PrivateEndpointServiceConnectionStatus = "Rejected"
)
// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type.
func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
- return []PrivateEndpointServiceConnectionStatus{PrivateEndpointServiceConnectionStatusApproved, PrivateEndpointServiceConnectionStatusPending, PrivateEndpointServiceConnectionStatusRejected}
+ return []PrivateEndpointServiceConnectionStatus{Approved, Pending, Rejected}
}
// ProtocolTypes enumerates the values for protocol types.
type ProtocolTypes string
const (
- // ProtocolTypesHTTP ...
- ProtocolTypesHTTP ProtocolTypes = "Http"
- // ProtocolTypesHTTPS ...
- ProtocolTypesHTTPS ProtocolTypes = "Https"
+ // HTTP ...
+ HTTP ProtocolTypes = "Http"
+ // HTTPS ...
+ HTTPS ProtocolTypes = "Https"
)
// PossibleProtocolTypesValues returns an array of possible values for the ProtocolTypes const type.
func PossibleProtocolTypesValues() []ProtocolTypes {
- return []ProtocolTypes{ProtocolTypesHTTP, ProtocolTypesHTTPS}
+ return []ProtocolTypes{HTTP, HTTPS}
}
// ProvisioningState enumerates the values for provisioning state.
@@ -1036,15 +1165,15 @@ func PossibleProvisioningState3Values() []ProvisioningState3 {
type ProximityPlacementGroupType string
const (
- // ProximityPlacementGroupTypeStandard ...
- ProximityPlacementGroupTypeStandard ProximityPlacementGroupType = "Standard"
- // ProximityPlacementGroupTypeUltra ...
- ProximityPlacementGroupTypeUltra ProximityPlacementGroupType = "Ultra"
+ // Standard ...
+ Standard ProximityPlacementGroupType = "Standard"
+ // Ultra ...
+ Ultra ProximityPlacementGroupType = "Ultra"
)
// PossibleProximityPlacementGroupTypeValues returns an array of possible values for the ProximityPlacementGroupType const type.
func PossibleProximityPlacementGroupTypeValues() []ProximityPlacementGroupType {
- return []ProximityPlacementGroupType{ProximityPlacementGroupTypeStandard, ProximityPlacementGroupTypeUltra}
+ return []ProximityPlacementGroupType{Standard, Ultra}
}
// PublicIPAddressSkuName enumerates the values for public ip address sku name.
@@ -1066,64 +1195,81 @@ func PossiblePublicIPAddressSkuNameValues() []PublicIPAddressSkuName {
type PublicIPAddressSkuTier string
const (
- // PublicIPAddressSkuTierGlobal ...
- PublicIPAddressSkuTierGlobal PublicIPAddressSkuTier = "Global"
- // PublicIPAddressSkuTierRegional ...
- PublicIPAddressSkuTierRegional PublicIPAddressSkuTier = "Regional"
+ // Global ...
+ Global PublicIPAddressSkuTier = "Global"
+ // Regional ...
+ Regional PublicIPAddressSkuTier = "Regional"
)
// PossiblePublicIPAddressSkuTierValues returns an array of possible values for the PublicIPAddressSkuTier const type.
func PossiblePublicIPAddressSkuTierValues() []PublicIPAddressSkuTier {
- return []PublicIPAddressSkuTier{PublicIPAddressSkuTierGlobal, PublicIPAddressSkuTierRegional}
+ return []PublicIPAddressSkuTier{Global, Regional}
}
// PublicIPAllocationMethod enumerates the values for public ip allocation method.
type PublicIPAllocationMethod string
const (
- // PublicIPAllocationMethodDynamic ...
- PublicIPAllocationMethodDynamic PublicIPAllocationMethod = "Dynamic"
- // PublicIPAllocationMethodStatic ...
- PublicIPAllocationMethodStatic PublicIPAllocationMethod = "Static"
+ // Dynamic ...
+ Dynamic PublicIPAllocationMethod = "Dynamic"
+ // Static ...
+ Static PublicIPAllocationMethod = "Static"
)
// PossiblePublicIPAllocationMethodValues returns an array of possible values for the PublicIPAllocationMethod const type.
func PossiblePublicIPAllocationMethodValues() []PublicIPAllocationMethod {
- return []PublicIPAllocationMethod{PublicIPAllocationMethodDynamic, PublicIPAllocationMethodStatic}
+ return []PublicIPAllocationMethod{Dynamic, Static}
}
// PublicNetworkAccess enumerates the values for public network access.
type PublicNetworkAccess string
const (
- // PublicNetworkAccessDisabled You cannot access the underlying data of the disk publicly on the internet
- // even when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your
+ // Disabled You cannot access the underlying data of the disk publicly on the internet even when
+ // NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your trusted
+ // Azure VNET when NetworkAccessPolicy is set to AllowPrivate.
+ Disabled PublicNetworkAccess = "Disabled"
+ // Enabled You can generate a SAS URI to access the underlying data of the disk publicly on the internet
+ // when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your
// trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate.
- PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled"
- // PublicNetworkAccessEnabled You can generate a SAS URI to access the underlying data of the disk publicly
- // on the internet when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI
- // only from your trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate.
- PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled"
+ Enabled PublicNetworkAccess = "Enabled"
)
// PossiblePublicNetworkAccessValues returns an array of possible values for the PublicNetworkAccess const type.
func PossiblePublicNetworkAccessValues() []PublicNetworkAccess {
- return []PublicNetworkAccess{PublicNetworkAccessDisabled, PublicNetworkAccessEnabled}
+ return []PublicNetworkAccess{Disabled, Enabled}
+}
+
+// RepairAction enumerates the values for repair action.
+type RepairAction string
+
+const (
+ // Reimage ...
+ Reimage RepairAction = "Reimage"
+ // Replace ...
+ Replace RepairAction = "Replace"
+ // Restart ...
+ Restart RepairAction = "Restart"
+)
+
+// PossibleRepairActionValues returns an array of possible values for the RepairAction const type.
+func PossibleRepairActionValues() []RepairAction {
+ return []RepairAction{Reimage, Replace, Restart}
}
// ReplicationMode enumerates the values for replication mode.
type ReplicationMode string
const (
- // ReplicationModeFull ...
- ReplicationModeFull ReplicationMode = "Full"
- // ReplicationModeShallow ...
- ReplicationModeShallow ReplicationMode = "Shallow"
+ // Full ...
+ Full ReplicationMode = "Full"
+ // Shallow ...
+ Shallow ReplicationMode = "Shallow"
)
// PossibleReplicationModeValues returns an array of possible values for the ReplicationMode const type.
func PossibleReplicationModeValues() []ReplicationMode {
- return []ReplicationMode{ReplicationModeFull, ReplicationModeShallow}
+ return []ReplicationMode{Full, Shallow}
}
// ReplicationState enumerates the values for replication state.
@@ -1198,58 +1344,71 @@ func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType
type ResourceSkuRestrictionsReasonCode string
const (
- // ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ...
- ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription"
- // ResourceSkuRestrictionsReasonCodeQuotaID ...
- ResourceSkuRestrictionsReasonCodeQuotaID ResourceSkuRestrictionsReasonCode = "QuotaId"
+ // NotAvailableForSubscription ...
+ NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription"
+ // QuotaID ...
+ QuotaID ResourceSkuRestrictionsReasonCode = "QuotaId"
)
// PossibleResourceSkuRestrictionsReasonCodeValues returns an array of possible values for the ResourceSkuRestrictionsReasonCode const type.
func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode {
- return []ResourceSkuRestrictionsReasonCode{ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription, ResourceSkuRestrictionsReasonCodeQuotaID}
+ return []ResourceSkuRestrictionsReasonCode{NotAvailableForSubscription, QuotaID}
}
// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type.
type ResourceSkuRestrictionsType string
const (
- // ResourceSkuRestrictionsTypeLocation ...
- ResourceSkuRestrictionsTypeLocation ResourceSkuRestrictionsType = "Location"
- // ResourceSkuRestrictionsTypeZone ...
- ResourceSkuRestrictionsTypeZone ResourceSkuRestrictionsType = "Zone"
+ // Location ...
+ Location ResourceSkuRestrictionsType = "Location"
+ // Zone ...
+ Zone ResourceSkuRestrictionsType = "Zone"
)
// PossibleResourceSkuRestrictionsTypeValues returns an array of possible values for the ResourceSkuRestrictionsType const type.
func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType {
- return []ResourceSkuRestrictionsType{ResourceSkuRestrictionsTypeLocation, ResourceSkuRestrictionsTypeZone}
+ return []ResourceSkuRestrictionsType{Location, Zone}
}
// RestorePointCollectionExpandOptions enumerates the values for restore point collection expand options.
type RestorePointCollectionExpandOptions string
const (
- // RestorePointCollectionExpandOptionsRestorePoints ...
- RestorePointCollectionExpandOptionsRestorePoints RestorePointCollectionExpandOptions = "restorePoints"
+ // RestorePoints ...
+ RestorePoints RestorePointCollectionExpandOptions = "restorePoints"
)
// PossibleRestorePointCollectionExpandOptionsValues returns an array of possible values for the RestorePointCollectionExpandOptions const type.
func PossibleRestorePointCollectionExpandOptionsValues() []RestorePointCollectionExpandOptions {
- return []RestorePointCollectionExpandOptions{RestorePointCollectionExpandOptionsRestorePoints}
+ return []RestorePointCollectionExpandOptions{RestorePoints}
+}
+
+// RestorePointExpandOptions enumerates the values for restore point expand options.
+type RestorePointExpandOptions string
+
+const (
+ // RestorePointExpandOptionsInstanceView ...
+ RestorePointExpandOptionsInstanceView RestorePointExpandOptions = "instanceView"
+)
+
+// PossibleRestorePointExpandOptionsValues returns an array of possible values for the RestorePointExpandOptions const type.
+func PossibleRestorePointExpandOptionsValues() []RestorePointExpandOptions {
+ return []RestorePointExpandOptions{RestorePointExpandOptionsInstanceView}
}
// RollingUpgradeActionType enumerates the values for rolling upgrade action type.
type RollingUpgradeActionType string
const (
- // RollingUpgradeActionTypeCancel ...
- RollingUpgradeActionTypeCancel RollingUpgradeActionType = "Cancel"
- // RollingUpgradeActionTypeStart ...
- RollingUpgradeActionTypeStart RollingUpgradeActionType = "Start"
+ // Cancel ...
+ Cancel RollingUpgradeActionType = "Cancel"
+ // Start ...
+ Start RollingUpgradeActionType = "Start"
)
// PossibleRollingUpgradeActionTypeValues returns an array of possible values for the RollingUpgradeActionType const type.
func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType {
- return []RollingUpgradeActionType{RollingUpgradeActionTypeCancel, RollingUpgradeActionTypeStart}
+ return []RollingUpgradeActionType{Cancel, Start}
}
// RollingUpgradeStatusCode enumerates the values for rolling upgrade status code.
@@ -1271,90 +1430,130 @@ func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode {
return []RollingUpgradeStatusCode{RollingUpgradeStatusCodeCancelled, RollingUpgradeStatusCodeCompleted, RollingUpgradeStatusCodeFaulted, RollingUpgradeStatusCodeRollingForward}
}
+// SecurityEncryptionTypes enumerates the values for security encryption types.
+type SecurityEncryptionTypes string
+
+const (
+ // DiskWithVMGuestState ...
+ DiskWithVMGuestState SecurityEncryptionTypes = "DiskWithVMGuestState"
+ // VMGuestStateOnly ...
+ VMGuestStateOnly SecurityEncryptionTypes = "VMGuestStateOnly"
+)
+
+// PossibleSecurityEncryptionTypesValues returns an array of possible values for the SecurityEncryptionTypes const type.
+func PossibleSecurityEncryptionTypesValues() []SecurityEncryptionTypes {
+ return []SecurityEncryptionTypes{DiskWithVMGuestState, VMGuestStateOnly}
+}
+
// SecurityTypes enumerates the values for security types.
type SecurityTypes string
const (
+ // SecurityTypesConfidentialVM ...
+ SecurityTypesConfidentialVM SecurityTypes = "ConfidentialVM"
// SecurityTypesTrustedLaunch ...
SecurityTypesTrustedLaunch SecurityTypes = "TrustedLaunch"
)
// PossibleSecurityTypesValues returns an array of possible values for the SecurityTypes const type.
func PossibleSecurityTypesValues() []SecurityTypes {
- return []SecurityTypes{SecurityTypesTrustedLaunch}
+ return []SecurityTypes{SecurityTypesConfidentialVM, SecurityTypesTrustedLaunch}
}
// SelectPermissions enumerates the values for select permissions.
type SelectPermissions string
const (
- // SelectPermissionsPermissions ...
- SelectPermissionsPermissions SelectPermissions = "Permissions"
+ // Permissions ...
+ Permissions SelectPermissions = "Permissions"
)
// PossibleSelectPermissionsValues returns an array of possible values for the SelectPermissions const type.
func PossibleSelectPermissionsValues() []SelectPermissions {
- return []SelectPermissions{SelectPermissionsPermissions}
+ return []SelectPermissions{Permissions}
}
// SettingNames enumerates the values for setting names.
type SettingNames string
const (
- // SettingNamesAutoLogon ...
- SettingNamesAutoLogon SettingNames = "AutoLogon"
- // SettingNamesFirstLogonCommands ...
- SettingNamesFirstLogonCommands SettingNames = "FirstLogonCommands"
+ // AutoLogon ...
+ AutoLogon SettingNames = "AutoLogon"
+ // FirstLogonCommands ...
+ FirstLogonCommands SettingNames = "FirstLogonCommands"
)
// PossibleSettingNamesValues returns an array of possible values for the SettingNames const type.
func PossibleSettingNamesValues() []SettingNames {
- return []SettingNames{SettingNamesAutoLogon, SettingNamesFirstLogonCommands}
+ return []SettingNames{AutoLogon, FirstLogonCommands}
}
// SharedToValues enumerates the values for shared to values.
type SharedToValues string
const (
- // SharedToValuesTenant ...
- SharedToValuesTenant SharedToValues = "tenant"
+ // Tenant ...
+ Tenant SharedToValues = "tenant"
)
// PossibleSharedToValuesValues returns an array of possible values for the SharedToValues const type.
func PossibleSharedToValuesValues() []SharedToValues {
- return []SharedToValues{SharedToValuesTenant}
+ return []SharedToValues{Tenant}
}
// SharingProfileGroupTypes enumerates the values for sharing profile group types.
type SharingProfileGroupTypes string
const (
- // SharingProfileGroupTypesAADTenants ...
- SharingProfileGroupTypesAADTenants SharingProfileGroupTypes = "AADTenants"
- // SharingProfileGroupTypesSubscriptions ...
- SharingProfileGroupTypesSubscriptions SharingProfileGroupTypes = "Subscriptions"
+ // AADTenants ...
+ AADTenants SharingProfileGroupTypes = "AADTenants"
+ // Community ...
+ Community SharingProfileGroupTypes = "Community"
+ // Subscriptions ...
+ Subscriptions SharingProfileGroupTypes = "Subscriptions"
)
// PossibleSharingProfileGroupTypesValues returns an array of possible values for the SharingProfileGroupTypes const type.
func PossibleSharingProfileGroupTypesValues() []SharingProfileGroupTypes {
- return []SharingProfileGroupTypes{SharingProfileGroupTypesAADTenants, SharingProfileGroupTypesSubscriptions}
+ return []SharingProfileGroupTypes{AADTenants, Community, Subscriptions}
+}
+
+// SharingState enumerates the values for sharing state.
+type SharingState string
+
+const (
+ // SharingStateFailed ...
+ SharingStateFailed SharingState = "Failed"
+ // SharingStateInProgress ...
+ SharingStateInProgress SharingState = "InProgress"
+ // SharingStateSucceeded ...
+ SharingStateSucceeded SharingState = "Succeeded"
+ // SharingStateUnknown ...
+ SharingStateUnknown SharingState = "Unknown"
+)
+
+// PossibleSharingStateValues returns an array of possible values for the SharingState const type.
+func PossibleSharingStateValues() []SharingState {
+ return []SharingState{SharingStateFailed, SharingStateInProgress, SharingStateSucceeded, SharingStateUnknown}
}
// SharingUpdateOperationTypes enumerates the values for sharing update operation types.
type SharingUpdateOperationTypes string
const (
- // SharingUpdateOperationTypesAdd ...
- SharingUpdateOperationTypesAdd SharingUpdateOperationTypes = "Add"
- // SharingUpdateOperationTypesRemove ...
- SharingUpdateOperationTypesRemove SharingUpdateOperationTypes = "Remove"
- // SharingUpdateOperationTypesReset ...
- SharingUpdateOperationTypesReset SharingUpdateOperationTypes = "Reset"
+ // Add ...
+ Add SharingUpdateOperationTypes = "Add"
+ // EnableCommunity ...
+ EnableCommunity SharingUpdateOperationTypes = "EnableCommunity"
+ // Remove ...
+ Remove SharingUpdateOperationTypes = "Remove"
+ // Reset ...
+ Reset SharingUpdateOperationTypes = "Reset"
)
// PossibleSharingUpdateOperationTypesValues returns an array of possible values for the SharingUpdateOperationTypes const type.
func PossibleSharingUpdateOperationTypesValues() []SharingUpdateOperationTypes {
- return []SharingUpdateOperationTypes{SharingUpdateOperationTypesAdd, SharingUpdateOperationTypesRemove, SharingUpdateOperationTypesReset}
+ return []SharingUpdateOperationTypes{Add, EnableCommunity, Remove, Reset}
}
// SnapshotStorageAccountTypes enumerates the values for snapshot storage account types.
@@ -1378,17 +1577,17 @@ func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes {
type StatusLevelTypes string
const (
- // StatusLevelTypesError ...
- StatusLevelTypesError StatusLevelTypes = "Error"
- // StatusLevelTypesInfo ...
- StatusLevelTypesInfo StatusLevelTypes = "Info"
- // StatusLevelTypesWarning ...
- StatusLevelTypesWarning StatusLevelTypes = "Warning"
+ // Error ...
+ Error StatusLevelTypes = "Error"
+ // Info ...
+ Info StatusLevelTypes = "Info"
+ // Warning ...
+ Warning StatusLevelTypes = "Warning"
)
// PossibleStatusLevelTypesValues returns an array of possible values for the StatusLevelTypes const type.
func PossibleStatusLevelTypesValues() []StatusLevelTypes {
- return []StatusLevelTypes{StatusLevelTypesError, StatusLevelTypesInfo, StatusLevelTypesWarning}
+ return []StatusLevelTypes{Error, Info, Warning}
}
// StorageAccountType enumerates the values for storage account type.
@@ -1414,6 +1613,8 @@ type StorageAccountTypes string
const (
// StorageAccountTypesPremiumLRS ...
StorageAccountTypesPremiumLRS StorageAccountTypes = "Premium_LRS"
+ // StorageAccountTypesPremiumV2LRS ...
+ StorageAccountTypesPremiumV2LRS StorageAccountTypes = "PremiumV2_LRS"
// StorageAccountTypesPremiumZRS ...
StorageAccountTypesPremiumZRS StorageAccountTypes = "Premium_ZRS"
// StorageAccountTypesStandardLRS ...
@@ -1428,7 +1629,7 @@ const (
// PossibleStorageAccountTypesValues returns an array of possible values for the StorageAccountTypes const type.
func PossibleStorageAccountTypesValues() []StorageAccountTypes {
- return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesPremiumZRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS, StorageAccountTypesStandardSSDZRS, StorageAccountTypesUltraSSDLRS}
+ return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesPremiumV2LRS, StorageAccountTypesPremiumZRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS, StorageAccountTypesStandardSSDZRS, StorageAccountTypesUltraSSDLRS}
}
// UpgradeMode enumerates the values for upgrade mode.
@@ -1503,34 +1704,34 @@ func PossibleVirtualMachineEvictionPolicyTypesValues() []VirtualMachineEvictionP
type VirtualMachinePriorityTypes string
const (
- // VirtualMachinePriorityTypesLow ...
- VirtualMachinePriorityTypesLow VirtualMachinePriorityTypes = "Low"
- // VirtualMachinePriorityTypesRegular ...
- VirtualMachinePriorityTypesRegular VirtualMachinePriorityTypes = "Regular"
- // VirtualMachinePriorityTypesSpot ...
- VirtualMachinePriorityTypesSpot VirtualMachinePriorityTypes = "Spot"
+ // Low ...
+ Low VirtualMachinePriorityTypes = "Low"
+ // Regular ...
+ Regular VirtualMachinePriorityTypes = "Regular"
+ // Spot ...
+ Spot VirtualMachinePriorityTypes = "Spot"
)
// PossibleVirtualMachinePriorityTypesValues returns an array of possible values for the VirtualMachinePriorityTypes const type.
func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes {
- return []VirtualMachinePriorityTypes{VirtualMachinePriorityTypesLow, VirtualMachinePriorityTypesRegular, VirtualMachinePriorityTypesSpot}
+ return []VirtualMachinePriorityTypes{Low, Regular, Spot}
}
// VirtualMachineScaleSetScaleInRules enumerates the values for virtual machine scale set scale in rules.
type VirtualMachineScaleSetScaleInRules string
const (
- // VirtualMachineScaleSetScaleInRulesDefault ...
- VirtualMachineScaleSetScaleInRulesDefault VirtualMachineScaleSetScaleInRules = "Default"
- // VirtualMachineScaleSetScaleInRulesNewestVM ...
- VirtualMachineScaleSetScaleInRulesNewestVM VirtualMachineScaleSetScaleInRules = "NewestVM"
- // VirtualMachineScaleSetScaleInRulesOldestVM ...
- VirtualMachineScaleSetScaleInRulesOldestVM VirtualMachineScaleSetScaleInRules = "OldestVM"
+ // Default ...
+ Default VirtualMachineScaleSetScaleInRules = "Default"
+ // NewestVM ...
+ NewestVM VirtualMachineScaleSetScaleInRules = "NewestVM"
+ // OldestVM ...
+ OldestVM VirtualMachineScaleSetScaleInRules = "OldestVM"
)
// PossibleVirtualMachineScaleSetScaleInRulesValues returns an array of possible values for the VirtualMachineScaleSetScaleInRules const type.
func PossibleVirtualMachineScaleSetScaleInRulesValues() []VirtualMachineScaleSetScaleInRules {
- return []VirtualMachineScaleSetScaleInRules{VirtualMachineScaleSetScaleInRulesDefault, VirtualMachineScaleSetScaleInRulesNewestVM, VirtualMachineScaleSetScaleInRulesOldestVM}
+ return []VirtualMachineScaleSetScaleInRules{Default, NewestVM, OldestVM}
}
// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type.
@@ -1552,343 +1753,343 @@ func PossibleVirtualMachineScaleSetSkuScaleTypeValues() []VirtualMachineScaleSet
type VirtualMachineSizeTypes string
const (
- // VirtualMachineSizeTypesBasicA0 ...
- VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0"
- // VirtualMachineSizeTypesBasicA1 ...
- VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1"
- // VirtualMachineSizeTypesBasicA2 ...
- VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2"
- // VirtualMachineSizeTypesBasicA3 ...
- VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3"
- // VirtualMachineSizeTypesBasicA4 ...
- VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4"
- // VirtualMachineSizeTypesStandardA0 ...
- VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0"
- // VirtualMachineSizeTypesStandardA1 ...
- VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1"
- // VirtualMachineSizeTypesStandardA10 ...
- VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10"
- // VirtualMachineSizeTypesStandardA11 ...
- VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11"
- // VirtualMachineSizeTypesStandardA1V2 ...
- VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2"
- // VirtualMachineSizeTypesStandardA2 ...
- VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2"
- // VirtualMachineSizeTypesStandardA2mV2 ...
- VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2"
- // VirtualMachineSizeTypesStandardA2V2 ...
- VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2"
- // VirtualMachineSizeTypesStandardA3 ...
- VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3"
- // VirtualMachineSizeTypesStandardA4 ...
- VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4"
- // VirtualMachineSizeTypesStandardA4mV2 ...
- VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2"
- // VirtualMachineSizeTypesStandardA4V2 ...
- VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2"
- // VirtualMachineSizeTypesStandardA5 ...
- VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5"
- // VirtualMachineSizeTypesStandardA6 ...
- VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6"
- // VirtualMachineSizeTypesStandardA7 ...
- VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7"
- // VirtualMachineSizeTypesStandardA8 ...
- VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8"
- // VirtualMachineSizeTypesStandardA8mV2 ...
- VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2"
- // VirtualMachineSizeTypesStandardA8V2 ...
- VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2"
- // VirtualMachineSizeTypesStandardA9 ...
- VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9"
- // VirtualMachineSizeTypesStandardB1ms ...
- VirtualMachineSizeTypesStandardB1ms VirtualMachineSizeTypes = "Standard_B1ms"
- // VirtualMachineSizeTypesStandardB1s ...
- VirtualMachineSizeTypesStandardB1s VirtualMachineSizeTypes = "Standard_B1s"
- // VirtualMachineSizeTypesStandardB2ms ...
- VirtualMachineSizeTypesStandardB2ms VirtualMachineSizeTypes = "Standard_B2ms"
- // VirtualMachineSizeTypesStandardB2s ...
- VirtualMachineSizeTypesStandardB2s VirtualMachineSizeTypes = "Standard_B2s"
- // VirtualMachineSizeTypesStandardB4ms ...
- VirtualMachineSizeTypesStandardB4ms VirtualMachineSizeTypes = "Standard_B4ms"
- // VirtualMachineSizeTypesStandardB8ms ...
- VirtualMachineSizeTypesStandardB8ms VirtualMachineSizeTypes = "Standard_B8ms"
- // VirtualMachineSizeTypesStandardD1 ...
- VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1"
- // VirtualMachineSizeTypesStandardD11 ...
- VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11"
- // VirtualMachineSizeTypesStandardD11V2 ...
- VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2"
- // VirtualMachineSizeTypesStandardD12 ...
- VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12"
- // VirtualMachineSizeTypesStandardD12V2 ...
- VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2"
- // VirtualMachineSizeTypesStandardD13 ...
- VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13"
- // VirtualMachineSizeTypesStandardD13V2 ...
- VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2"
- // VirtualMachineSizeTypesStandardD14 ...
- VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14"
- // VirtualMachineSizeTypesStandardD14V2 ...
- VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2"
- // VirtualMachineSizeTypesStandardD15V2 ...
- VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2"
- // VirtualMachineSizeTypesStandardD16sV3 ...
- VirtualMachineSizeTypesStandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3"
- // VirtualMachineSizeTypesStandardD16V3 ...
- VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3"
- // VirtualMachineSizeTypesStandardD1V2 ...
- VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2"
- // VirtualMachineSizeTypesStandardD2 ...
- VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2"
- // VirtualMachineSizeTypesStandardD2sV3 ...
- VirtualMachineSizeTypesStandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3"
- // VirtualMachineSizeTypesStandardD2V2 ...
- VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2"
- // VirtualMachineSizeTypesStandardD2V3 ...
- VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3"
- // VirtualMachineSizeTypesStandardD3 ...
- VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3"
- // VirtualMachineSizeTypesStandardD32sV3 ...
- VirtualMachineSizeTypesStandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3"
- // VirtualMachineSizeTypesStandardD32V3 ...
- VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3"
- // VirtualMachineSizeTypesStandardD3V2 ...
- VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2"
- // VirtualMachineSizeTypesStandardD4 ...
- VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4"
- // VirtualMachineSizeTypesStandardD4sV3 ...
- VirtualMachineSizeTypesStandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3"
- // VirtualMachineSizeTypesStandardD4V2 ...
- VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2"
- // VirtualMachineSizeTypesStandardD4V3 ...
- VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3"
- // VirtualMachineSizeTypesStandardD5V2 ...
- VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2"
- // VirtualMachineSizeTypesStandardD64sV3 ...
- VirtualMachineSizeTypesStandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3"
- // VirtualMachineSizeTypesStandardD64V3 ...
- VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3"
- // VirtualMachineSizeTypesStandardD8sV3 ...
- VirtualMachineSizeTypesStandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3"
- // VirtualMachineSizeTypesStandardD8V3 ...
- VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3"
- // VirtualMachineSizeTypesStandardDS1 ...
- VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1"
- // VirtualMachineSizeTypesStandardDS11 ...
- VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11"
- // VirtualMachineSizeTypesStandardDS11V2 ...
- VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2"
- // VirtualMachineSizeTypesStandardDS12 ...
- VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12"
- // VirtualMachineSizeTypesStandardDS12V2 ...
- VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2"
- // VirtualMachineSizeTypesStandardDS13 ...
- VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13"
- // VirtualMachineSizeTypesStandardDS132V2 ...
- VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2"
- // VirtualMachineSizeTypesStandardDS134V2 ...
- VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2"
- // VirtualMachineSizeTypesStandardDS13V2 ...
- VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2"
- // VirtualMachineSizeTypesStandardDS14 ...
- VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14"
- // VirtualMachineSizeTypesStandardDS144V2 ...
- VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2"
- // VirtualMachineSizeTypesStandardDS148V2 ...
- VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2"
- // VirtualMachineSizeTypesStandardDS14V2 ...
- VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2"
- // VirtualMachineSizeTypesStandardDS15V2 ...
- VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2"
- // VirtualMachineSizeTypesStandardDS1V2 ...
- VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2"
- // VirtualMachineSizeTypesStandardDS2 ...
- VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2"
- // VirtualMachineSizeTypesStandardDS2V2 ...
- VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2"
- // VirtualMachineSizeTypesStandardDS3 ...
- VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3"
- // VirtualMachineSizeTypesStandardDS3V2 ...
- VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2"
- // VirtualMachineSizeTypesStandardDS4 ...
- VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4"
- // VirtualMachineSizeTypesStandardDS4V2 ...
- VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2"
- // VirtualMachineSizeTypesStandardDS5V2 ...
- VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2"
- // VirtualMachineSizeTypesStandardE16sV3 ...
- VirtualMachineSizeTypesStandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3"
- // VirtualMachineSizeTypesStandardE16V3 ...
- VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3"
- // VirtualMachineSizeTypesStandardE2sV3 ...
- VirtualMachineSizeTypesStandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3"
- // VirtualMachineSizeTypesStandardE2V3 ...
- VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3"
- // VirtualMachineSizeTypesStandardE3216V3 ...
- VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3"
- // VirtualMachineSizeTypesStandardE328sV3 ...
- VirtualMachineSizeTypesStandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3"
- // VirtualMachineSizeTypesStandardE32sV3 ...
- VirtualMachineSizeTypesStandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3"
- // VirtualMachineSizeTypesStandardE32V3 ...
- VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3"
- // VirtualMachineSizeTypesStandardE4sV3 ...
- VirtualMachineSizeTypesStandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3"
- // VirtualMachineSizeTypesStandardE4V3 ...
- VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3"
- // VirtualMachineSizeTypesStandardE6416sV3 ...
- VirtualMachineSizeTypesStandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3"
- // VirtualMachineSizeTypesStandardE6432sV3 ...
- VirtualMachineSizeTypesStandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3"
- // VirtualMachineSizeTypesStandardE64sV3 ...
- VirtualMachineSizeTypesStandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3"
- // VirtualMachineSizeTypesStandardE64V3 ...
- VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3"
- // VirtualMachineSizeTypesStandardE8sV3 ...
- VirtualMachineSizeTypesStandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3"
- // VirtualMachineSizeTypesStandardE8V3 ...
- VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3"
- // VirtualMachineSizeTypesStandardF1 ...
- VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1"
- // VirtualMachineSizeTypesStandardF16 ...
- VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16"
- // VirtualMachineSizeTypesStandardF16s ...
- VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = "Standard_F16s"
- // VirtualMachineSizeTypesStandardF16sV2 ...
- VirtualMachineSizeTypesStandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2"
- // VirtualMachineSizeTypesStandardF1s ...
- VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = "Standard_F1s"
- // VirtualMachineSizeTypesStandardF2 ...
- VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2"
- // VirtualMachineSizeTypesStandardF2s ...
- VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = "Standard_F2s"
- // VirtualMachineSizeTypesStandardF2sV2 ...
- VirtualMachineSizeTypesStandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2"
- // VirtualMachineSizeTypesStandardF32sV2 ...
- VirtualMachineSizeTypesStandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2"
- // VirtualMachineSizeTypesStandardF4 ...
- VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4"
- // VirtualMachineSizeTypesStandardF4s ...
- VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = "Standard_F4s"
- // VirtualMachineSizeTypesStandardF4sV2 ...
- VirtualMachineSizeTypesStandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2"
- // VirtualMachineSizeTypesStandardF64sV2 ...
- VirtualMachineSizeTypesStandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2"
- // VirtualMachineSizeTypesStandardF72sV2 ...
- VirtualMachineSizeTypesStandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2"
- // VirtualMachineSizeTypesStandardF8 ...
- VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8"
- // VirtualMachineSizeTypesStandardF8s ...
- VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = "Standard_F8s"
- // VirtualMachineSizeTypesStandardF8sV2 ...
- VirtualMachineSizeTypesStandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2"
- // VirtualMachineSizeTypesStandardG1 ...
- VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1"
- // VirtualMachineSizeTypesStandardG2 ...
- VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2"
- // VirtualMachineSizeTypesStandardG3 ...
- VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3"
- // VirtualMachineSizeTypesStandardG4 ...
- VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4"
- // VirtualMachineSizeTypesStandardG5 ...
- VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5"
- // VirtualMachineSizeTypesStandardGS1 ...
- VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1"
- // VirtualMachineSizeTypesStandardGS2 ...
- VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2"
- // VirtualMachineSizeTypesStandardGS3 ...
- VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3"
- // VirtualMachineSizeTypesStandardGS4 ...
- VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4"
- // VirtualMachineSizeTypesStandardGS44 ...
- VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4"
- // VirtualMachineSizeTypesStandardGS48 ...
- VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8"
- // VirtualMachineSizeTypesStandardGS5 ...
- VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5"
- // VirtualMachineSizeTypesStandardGS516 ...
- VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16"
- // VirtualMachineSizeTypesStandardGS58 ...
- VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8"
- // VirtualMachineSizeTypesStandardH16 ...
- VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16"
- // VirtualMachineSizeTypesStandardH16m ...
- VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = "Standard_H16m"
- // VirtualMachineSizeTypesStandardH16mr ...
- VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = "Standard_H16mr"
- // VirtualMachineSizeTypesStandardH16r ...
- VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = "Standard_H16r"
- // VirtualMachineSizeTypesStandardH8 ...
- VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8"
- // VirtualMachineSizeTypesStandardH8m ...
- VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = "Standard_H8m"
- // VirtualMachineSizeTypesStandardL16s ...
- VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = "Standard_L16s"
- // VirtualMachineSizeTypesStandardL32s ...
- VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = "Standard_L32s"
- // VirtualMachineSizeTypesStandardL4s ...
- VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = "Standard_L4s"
- // VirtualMachineSizeTypesStandardL8s ...
- VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = "Standard_L8s"
- // VirtualMachineSizeTypesStandardM12832ms ...
- VirtualMachineSizeTypesStandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms"
- // VirtualMachineSizeTypesStandardM12864ms ...
- VirtualMachineSizeTypesStandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms"
- // VirtualMachineSizeTypesStandardM128ms ...
- VirtualMachineSizeTypesStandardM128ms VirtualMachineSizeTypes = "Standard_M128ms"
- // VirtualMachineSizeTypesStandardM128s ...
- VirtualMachineSizeTypesStandardM128s VirtualMachineSizeTypes = "Standard_M128s"
- // VirtualMachineSizeTypesStandardM6416ms ...
- VirtualMachineSizeTypesStandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms"
- // VirtualMachineSizeTypesStandardM6432ms ...
- VirtualMachineSizeTypesStandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms"
- // VirtualMachineSizeTypesStandardM64ms ...
- VirtualMachineSizeTypesStandardM64ms VirtualMachineSizeTypes = "Standard_M64ms"
- // VirtualMachineSizeTypesStandardM64s ...
- VirtualMachineSizeTypesStandardM64s VirtualMachineSizeTypes = "Standard_M64s"
- // VirtualMachineSizeTypesStandardNC12 ...
- VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12"
- // VirtualMachineSizeTypesStandardNC12sV2 ...
- VirtualMachineSizeTypesStandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2"
- // VirtualMachineSizeTypesStandardNC12sV3 ...
- VirtualMachineSizeTypesStandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3"
- // VirtualMachineSizeTypesStandardNC24 ...
- VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24"
- // VirtualMachineSizeTypesStandardNC24r ...
- VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = "Standard_NC24r"
- // VirtualMachineSizeTypesStandardNC24rsV2 ...
- VirtualMachineSizeTypesStandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2"
- // VirtualMachineSizeTypesStandardNC24rsV3 ...
- VirtualMachineSizeTypesStandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3"
- // VirtualMachineSizeTypesStandardNC24sV2 ...
- VirtualMachineSizeTypesStandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2"
- // VirtualMachineSizeTypesStandardNC24sV3 ...
- VirtualMachineSizeTypesStandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3"
- // VirtualMachineSizeTypesStandardNC6 ...
- VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6"
- // VirtualMachineSizeTypesStandardNC6sV2 ...
- VirtualMachineSizeTypesStandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2"
- // VirtualMachineSizeTypesStandardNC6sV3 ...
- VirtualMachineSizeTypesStandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3"
- // VirtualMachineSizeTypesStandardND12s ...
- VirtualMachineSizeTypesStandardND12s VirtualMachineSizeTypes = "Standard_ND12s"
- // VirtualMachineSizeTypesStandardND24rs ...
- VirtualMachineSizeTypesStandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs"
- // VirtualMachineSizeTypesStandardND24s ...
- VirtualMachineSizeTypesStandardND24s VirtualMachineSizeTypes = "Standard_ND24s"
- // VirtualMachineSizeTypesStandardND6s ...
- VirtualMachineSizeTypesStandardND6s VirtualMachineSizeTypes = "Standard_ND6s"
- // VirtualMachineSizeTypesStandardNV12 ...
- VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12"
- // VirtualMachineSizeTypesStandardNV24 ...
- VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24"
- // VirtualMachineSizeTypesStandardNV6 ...
- VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6"
+ // BasicA0 ...
+ BasicA0 VirtualMachineSizeTypes = "Basic_A0"
+ // BasicA1 ...
+ BasicA1 VirtualMachineSizeTypes = "Basic_A1"
+ // BasicA2 ...
+ BasicA2 VirtualMachineSizeTypes = "Basic_A2"
+ // BasicA3 ...
+ BasicA3 VirtualMachineSizeTypes = "Basic_A3"
+ // BasicA4 ...
+ BasicA4 VirtualMachineSizeTypes = "Basic_A4"
+ // StandardA0 ...
+ StandardA0 VirtualMachineSizeTypes = "Standard_A0"
+ // StandardA1 ...
+ StandardA1 VirtualMachineSizeTypes = "Standard_A1"
+ // StandardA10 ...
+ StandardA10 VirtualMachineSizeTypes = "Standard_A10"
+ // StandardA11 ...
+ StandardA11 VirtualMachineSizeTypes = "Standard_A11"
+ // StandardA1V2 ...
+ StandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2"
+ // StandardA2 ...
+ StandardA2 VirtualMachineSizeTypes = "Standard_A2"
+ // StandardA2mV2 ...
+ StandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2"
+ // StandardA2V2 ...
+ StandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2"
+ // StandardA3 ...
+ StandardA3 VirtualMachineSizeTypes = "Standard_A3"
+ // StandardA4 ...
+ StandardA4 VirtualMachineSizeTypes = "Standard_A4"
+ // StandardA4mV2 ...
+ StandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2"
+ // StandardA4V2 ...
+ StandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2"
+ // StandardA5 ...
+ StandardA5 VirtualMachineSizeTypes = "Standard_A5"
+ // StandardA6 ...
+ StandardA6 VirtualMachineSizeTypes = "Standard_A6"
+ // StandardA7 ...
+ StandardA7 VirtualMachineSizeTypes = "Standard_A7"
+ // StandardA8 ...
+ StandardA8 VirtualMachineSizeTypes = "Standard_A8"
+ // StandardA8mV2 ...
+ StandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2"
+ // StandardA8V2 ...
+ StandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2"
+ // StandardA9 ...
+ StandardA9 VirtualMachineSizeTypes = "Standard_A9"
+ // StandardB1ms ...
+ StandardB1ms VirtualMachineSizeTypes = "Standard_B1ms"
+ // StandardB1s ...
+ StandardB1s VirtualMachineSizeTypes = "Standard_B1s"
+ // StandardB2ms ...
+ StandardB2ms VirtualMachineSizeTypes = "Standard_B2ms"
+ // StandardB2s ...
+ StandardB2s VirtualMachineSizeTypes = "Standard_B2s"
+ // StandardB4ms ...
+ StandardB4ms VirtualMachineSizeTypes = "Standard_B4ms"
+ // StandardB8ms ...
+ StandardB8ms VirtualMachineSizeTypes = "Standard_B8ms"
+ // StandardD1 ...
+ StandardD1 VirtualMachineSizeTypes = "Standard_D1"
+ // StandardD11 ...
+ StandardD11 VirtualMachineSizeTypes = "Standard_D11"
+ // StandardD11V2 ...
+ StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2"
+ // StandardD12 ...
+ StandardD12 VirtualMachineSizeTypes = "Standard_D12"
+ // StandardD12V2 ...
+ StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2"
+ // StandardD13 ...
+ StandardD13 VirtualMachineSizeTypes = "Standard_D13"
+ // StandardD13V2 ...
+ StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2"
+ // StandardD14 ...
+ StandardD14 VirtualMachineSizeTypes = "Standard_D14"
+ // StandardD14V2 ...
+ StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2"
+ // StandardD15V2 ...
+ StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2"
+ // StandardD16sV3 ...
+ StandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3"
+ // StandardD16V3 ...
+ StandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3"
+ // StandardD1V2 ...
+ StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2"
+ // StandardD2 ...
+ StandardD2 VirtualMachineSizeTypes = "Standard_D2"
+ // StandardD2sV3 ...
+ StandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3"
+ // StandardD2V2 ...
+ StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2"
+ // StandardD2V3 ...
+ StandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3"
+ // StandardD3 ...
+ StandardD3 VirtualMachineSizeTypes = "Standard_D3"
+ // StandardD32sV3 ...
+ StandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3"
+ // StandardD32V3 ...
+ StandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3"
+ // StandardD3V2 ...
+ StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2"
+ // StandardD4 ...
+ StandardD4 VirtualMachineSizeTypes = "Standard_D4"
+ // StandardD4sV3 ...
+ StandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3"
+ // StandardD4V2 ...
+ StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2"
+ // StandardD4V3 ...
+ StandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3"
+ // StandardD5V2 ...
+ StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2"
+ // StandardD64sV3 ...
+ StandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3"
+ // StandardD64V3 ...
+ StandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3"
+ // StandardD8sV3 ...
+ StandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3"
+ // StandardD8V3 ...
+ StandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3"
+ // StandardDS1 ...
+ StandardDS1 VirtualMachineSizeTypes = "Standard_DS1"
+ // StandardDS11 ...
+ StandardDS11 VirtualMachineSizeTypes = "Standard_DS11"
+ // StandardDS11V2 ...
+ StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2"
+ // StandardDS12 ...
+ StandardDS12 VirtualMachineSizeTypes = "Standard_DS12"
+ // StandardDS12V2 ...
+ StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2"
+ // StandardDS13 ...
+ StandardDS13 VirtualMachineSizeTypes = "Standard_DS13"
+ // StandardDS132V2 ...
+ StandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2"
+ // StandardDS134V2 ...
+ StandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2"
+ // StandardDS13V2 ...
+ StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2"
+ // StandardDS14 ...
+ StandardDS14 VirtualMachineSizeTypes = "Standard_DS14"
+ // StandardDS144V2 ...
+ StandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2"
+ // StandardDS148V2 ...
+ StandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2"
+ // StandardDS14V2 ...
+ StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2"
+ // StandardDS15V2 ...
+ StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2"
+ // StandardDS1V2 ...
+ StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2"
+ // StandardDS2 ...
+ StandardDS2 VirtualMachineSizeTypes = "Standard_DS2"
+ // StandardDS2V2 ...
+ StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2"
+ // StandardDS3 ...
+ StandardDS3 VirtualMachineSizeTypes = "Standard_DS3"
+ // StandardDS3V2 ...
+ StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2"
+ // StandardDS4 ...
+ StandardDS4 VirtualMachineSizeTypes = "Standard_DS4"
+ // StandardDS4V2 ...
+ StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2"
+ // StandardDS5V2 ...
+ StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2"
+ // StandardE16sV3 ...
+ StandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3"
+ // StandardE16V3 ...
+ StandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3"
+ // StandardE2sV3 ...
+ StandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3"
+ // StandardE2V3 ...
+ StandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3"
+ // StandardE3216V3 ...
+ StandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3"
+ // StandardE328sV3 ...
+ StandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3"
+ // StandardE32sV3 ...
+ StandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3"
+ // StandardE32V3 ...
+ StandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3"
+ // StandardE4sV3 ...
+ StandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3"
+ // StandardE4V3 ...
+ StandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3"
+ // StandardE6416sV3 ...
+ StandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3"
+ // StandardE6432sV3 ...
+ StandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3"
+ // StandardE64sV3 ...
+ StandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3"
+ // StandardE64V3 ...
+ StandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3"
+ // StandardE8sV3 ...
+ StandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3"
+ // StandardE8V3 ...
+ StandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3"
+ // StandardF1 ...
+ StandardF1 VirtualMachineSizeTypes = "Standard_F1"
+ // StandardF16 ...
+ StandardF16 VirtualMachineSizeTypes = "Standard_F16"
+ // StandardF16s ...
+ StandardF16s VirtualMachineSizeTypes = "Standard_F16s"
+ // StandardF16sV2 ...
+ StandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2"
+ // StandardF1s ...
+ StandardF1s VirtualMachineSizeTypes = "Standard_F1s"
+ // StandardF2 ...
+ StandardF2 VirtualMachineSizeTypes = "Standard_F2"
+ // StandardF2s ...
+ StandardF2s VirtualMachineSizeTypes = "Standard_F2s"
+ // StandardF2sV2 ...
+ StandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2"
+ // StandardF32sV2 ...
+ StandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2"
+ // StandardF4 ...
+ StandardF4 VirtualMachineSizeTypes = "Standard_F4"
+ // StandardF4s ...
+ StandardF4s VirtualMachineSizeTypes = "Standard_F4s"
+ // StandardF4sV2 ...
+ StandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2"
+ // StandardF64sV2 ...
+ StandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2"
+ // StandardF72sV2 ...
+ StandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2"
+ // StandardF8 ...
+ StandardF8 VirtualMachineSizeTypes = "Standard_F8"
+ // StandardF8s ...
+ StandardF8s VirtualMachineSizeTypes = "Standard_F8s"
+ // StandardF8sV2 ...
+ StandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2"
+ // StandardG1 ...
+ StandardG1 VirtualMachineSizeTypes = "Standard_G1"
+ // StandardG2 ...
+ StandardG2 VirtualMachineSizeTypes = "Standard_G2"
+ // StandardG3 ...
+ StandardG3 VirtualMachineSizeTypes = "Standard_G3"
+ // StandardG4 ...
+ StandardG4 VirtualMachineSizeTypes = "Standard_G4"
+ // StandardG5 ...
+ StandardG5 VirtualMachineSizeTypes = "Standard_G5"
+ // StandardGS1 ...
+ StandardGS1 VirtualMachineSizeTypes = "Standard_GS1"
+ // StandardGS2 ...
+ StandardGS2 VirtualMachineSizeTypes = "Standard_GS2"
+ // StandardGS3 ...
+ StandardGS3 VirtualMachineSizeTypes = "Standard_GS3"
+ // StandardGS4 ...
+ StandardGS4 VirtualMachineSizeTypes = "Standard_GS4"
+ // StandardGS44 ...
+ StandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4"
+ // StandardGS48 ...
+ StandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8"
+ // StandardGS5 ...
+ StandardGS5 VirtualMachineSizeTypes = "Standard_GS5"
+ // StandardGS516 ...
+ StandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16"
+ // StandardGS58 ...
+ StandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8"
+ // StandardH16 ...
+ StandardH16 VirtualMachineSizeTypes = "Standard_H16"
+ // StandardH16m ...
+ StandardH16m VirtualMachineSizeTypes = "Standard_H16m"
+ // StandardH16mr ...
+ StandardH16mr VirtualMachineSizeTypes = "Standard_H16mr"
+ // StandardH16r ...
+ StandardH16r VirtualMachineSizeTypes = "Standard_H16r"
+ // StandardH8 ...
+ StandardH8 VirtualMachineSizeTypes = "Standard_H8"
+ // StandardH8m ...
+ StandardH8m VirtualMachineSizeTypes = "Standard_H8m"
+ // StandardL16s ...
+ StandardL16s VirtualMachineSizeTypes = "Standard_L16s"
+ // StandardL32s ...
+ StandardL32s VirtualMachineSizeTypes = "Standard_L32s"
+ // StandardL4s ...
+ StandardL4s VirtualMachineSizeTypes = "Standard_L4s"
+ // StandardL8s ...
+ StandardL8s VirtualMachineSizeTypes = "Standard_L8s"
+ // StandardM12832ms ...
+ StandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms"
+ // StandardM12864ms ...
+ StandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms"
+ // StandardM128ms ...
+ StandardM128ms VirtualMachineSizeTypes = "Standard_M128ms"
+ // StandardM128s ...
+ StandardM128s VirtualMachineSizeTypes = "Standard_M128s"
+ // StandardM6416ms ...
+ StandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms"
+ // StandardM6432ms ...
+ StandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms"
+ // StandardM64ms ...
+ StandardM64ms VirtualMachineSizeTypes = "Standard_M64ms"
+ // StandardM64s ...
+ StandardM64s VirtualMachineSizeTypes = "Standard_M64s"
+ // StandardNC12 ...
+ StandardNC12 VirtualMachineSizeTypes = "Standard_NC12"
+ // StandardNC12sV2 ...
+ StandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2"
+ // StandardNC12sV3 ...
+ StandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3"
+ // StandardNC24 ...
+ StandardNC24 VirtualMachineSizeTypes = "Standard_NC24"
+ // StandardNC24r ...
+ StandardNC24r VirtualMachineSizeTypes = "Standard_NC24r"
+ // StandardNC24rsV2 ...
+ StandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2"
+ // StandardNC24rsV3 ...
+ StandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3"
+ // StandardNC24sV2 ...
+ StandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2"
+ // StandardNC24sV3 ...
+ StandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3"
+ // StandardNC6 ...
+ StandardNC6 VirtualMachineSizeTypes = "Standard_NC6"
+ // StandardNC6sV2 ...
+ StandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2"
+ // StandardNC6sV3 ...
+ StandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3"
+ // StandardND12s ...
+ StandardND12s VirtualMachineSizeTypes = "Standard_ND12s"
+ // StandardND24rs ...
+ StandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs"
+ // StandardND24s ...
+ StandardND24s VirtualMachineSizeTypes = "Standard_ND24s"
+ // StandardND6s ...
+ StandardND6s VirtualMachineSizeTypes = "Standard_ND6s"
+ // StandardNV12 ...
+ StandardNV12 VirtualMachineSizeTypes = "Standard_NV12"
+ // StandardNV24 ...
+ StandardNV24 VirtualMachineSizeTypes = "Standard_NV24"
+ // StandardNV6 ...
+ StandardNV6 VirtualMachineSizeTypes = "Standard_NV6"
)
// PossibleVirtualMachineSizeTypesValues returns an array of possible values for the VirtualMachineSizeTypes const type.
func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes {
- return []VirtualMachineSizeTypes{VirtualMachineSizeTypesBasicA0, VirtualMachineSizeTypesBasicA1, VirtualMachineSizeTypesBasicA2, VirtualMachineSizeTypesBasicA3, VirtualMachineSizeTypesBasicA4, VirtualMachineSizeTypesStandardA0, VirtualMachineSizeTypesStandardA1, VirtualMachineSizeTypesStandardA10, VirtualMachineSizeTypesStandardA11, VirtualMachineSizeTypesStandardA1V2, VirtualMachineSizeTypesStandardA2, VirtualMachineSizeTypesStandardA2mV2, VirtualMachineSizeTypesStandardA2V2, VirtualMachineSizeTypesStandardA3, VirtualMachineSizeTypesStandardA4, VirtualMachineSizeTypesStandardA4mV2, VirtualMachineSizeTypesStandardA4V2, VirtualMachineSizeTypesStandardA5, VirtualMachineSizeTypesStandardA6, VirtualMachineSizeTypesStandardA7, VirtualMachineSizeTypesStandardA8, VirtualMachineSizeTypesStandardA8mV2, VirtualMachineSizeTypesStandardA8V2, VirtualMachineSizeTypesStandardA9, VirtualMachineSizeTypesStandardB1ms, VirtualMachineSizeTypesStandardB1s, VirtualMachineSizeTypesStandardB2ms, VirtualMachineSizeTypesStandardB2s, VirtualMachineSizeTypesStandardB4ms, VirtualMachineSizeTypesStandardB8ms, VirtualMachineSizeTypesStandardD1, VirtualMachineSizeTypesStandardD11, VirtualMachineSizeTypesStandardD11V2, VirtualMachineSizeTypesStandardD12, VirtualMachineSizeTypesStandardD12V2, VirtualMachineSizeTypesStandardD13, VirtualMachineSizeTypesStandardD13V2, VirtualMachineSizeTypesStandardD14, VirtualMachineSizeTypesStandardD14V2, VirtualMachineSizeTypesStandardD15V2, VirtualMachineSizeTypesStandardD16sV3, VirtualMachineSizeTypesStandardD16V3, VirtualMachineSizeTypesStandardD1V2, VirtualMachineSizeTypesStandardD2, VirtualMachineSizeTypesStandardD2sV3, VirtualMachineSizeTypesStandardD2V2, VirtualMachineSizeTypesStandardD2V3, VirtualMachineSizeTypesStandardD3, VirtualMachineSizeTypesStandardD32sV3, VirtualMachineSizeTypesStandardD32V3, VirtualMachineSizeTypesStandardD3V2, VirtualMachineSizeTypesStandardD4, VirtualMachineSizeTypesStandardD4sV3, VirtualMachineSizeTypesStandardD4V2, VirtualMachineSizeTypesStandardD4V3, VirtualMachineSizeTypesStandardD5V2, VirtualMachineSizeTypesStandardD64sV3, VirtualMachineSizeTypesStandardD64V3, VirtualMachineSizeTypesStandardD8sV3, VirtualMachineSizeTypesStandardD8V3, VirtualMachineSizeTypesStandardDS1, VirtualMachineSizeTypesStandardDS11, VirtualMachineSizeTypesStandardDS11V2, VirtualMachineSizeTypesStandardDS12, VirtualMachineSizeTypesStandardDS12V2, VirtualMachineSizeTypesStandardDS13, VirtualMachineSizeTypesStandardDS132V2, VirtualMachineSizeTypesStandardDS134V2, VirtualMachineSizeTypesStandardDS13V2, VirtualMachineSizeTypesStandardDS14, VirtualMachineSizeTypesStandardDS144V2, VirtualMachineSizeTypesStandardDS148V2, VirtualMachineSizeTypesStandardDS14V2, VirtualMachineSizeTypesStandardDS15V2, VirtualMachineSizeTypesStandardDS1V2, VirtualMachineSizeTypesStandardDS2, VirtualMachineSizeTypesStandardDS2V2, VirtualMachineSizeTypesStandardDS3, VirtualMachineSizeTypesStandardDS3V2, VirtualMachineSizeTypesStandardDS4, VirtualMachineSizeTypesStandardDS4V2, VirtualMachineSizeTypesStandardDS5V2, VirtualMachineSizeTypesStandardE16sV3, VirtualMachineSizeTypesStandardE16V3, VirtualMachineSizeTypesStandardE2sV3, VirtualMachineSizeTypesStandardE2V3, VirtualMachineSizeTypesStandardE3216V3, VirtualMachineSizeTypesStandardE328sV3, VirtualMachineSizeTypesStandardE32sV3, VirtualMachineSizeTypesStandardE32V3, VirtualMachineSizeTypesStandardE4sV3, VirtualMachineSizeTypesStandardE4V3, VirtualMachineSizeTypesStandardE6416sV3, VirtualMachineSizeTypesStandardE6432sV3, VirtualMachineSizeTypesStandardE64sV3, VirtualMachineSizeTypesStandardE64V3, VirtualMachineSizeTypesStandardE8sV3, VirtualMachineSizeTypesStandardE8V3, VirtualMachineSizeTypesStandardF1, VirtualMachineSizeTypesStandardF16, VirtualMachineSizeTypesStandardF16s, VirtualMachineSizeTypesStandardF16sV2, VirtualMachineSizeTypesStandardF1s, VirtualMachineSizeTypesStandardF2, VirtualMachineSizeTypesStandardF2s, VirtualMachineSizeTypesStandardF2sV2, VirtualMachineSizeTypesStandardF32sV2, VirtualMachineSizeTypesStandardF4, VirtualMachineSizeTypesStandardF4s, VirtualMachineSizeTypesStandardF4sV2, VirtualMachineSizeTypesStandardF64sV2, VirtualMachineSizeTypesStandardF72sV2, VirtualMachineSizeTypesStandardF8, VirtualMachineSizeTypesStandardF8s, VirtualMachineSizeTypesStandardF8sV2, VirtualMachineSizeTypesStandardG1, VirtualMachineSizeTypesStandardG2, VirtualMachineSizeTypesStandardG3, VirtualMachineSizeTypesStandardG4, VirtualMachineSizeTypesStandardG5, VirtualMachineSizeTypesStandardGS1, VirtualMachineSizeTypesStandardGS2, VirtualMachineSizeTypesStandardGS3, VirtualMachineSizeTypesStandardGS4, VirtualMachineSizeTypesStandardGS44, VirtualMachineSizeTypesStandardGS48, VirtualMachineSizeTypesStandardGS5, VirtualMachineSizeTypesStandardGS516, VirtualMachineSizeTypesStandardGS58, VirtualMachineSizeTypesStandardH16, VirtualMachineSizeTypesStandardH16m, VirtualMachineSizeTypesStandardH16mr, VirtualMachineSizeTypesStandardH16r, VirtualMachineSizeTypesStandardH8, VirtualMachineSizeTypesStandardH8m, VirtualMachineSizeTypesStandardL16s, VirtualMachineSizeTypesStandardL32s, VirtualMachineSizeTypesStandardL4s, VirtualMachineSizeTypesStandardL8s, VirtualMachineSizeTypesStandardM12832ms, VirtualMachineSizeTypesStandardM12864ms, VirtualMachineSizeTypesStandardM128ms, VirtualMachineSizeTypesStandardM128s, VirtualMachineSizeTypesStandardM6416ms, VirtualMachineSizeTypesStandardM6432ms, VirtualMachineSizeTypesStandardM64ms, VirtualMachineSizeTypesStandardM64s, VirtualMachineSizeTypesStandardNC12, VirtualMachineSizeTypesStandardNC12sV2, VirtualMachineSizeTypesStandardNC12sV3, VirtualMachineSizeTypesStandardNC24, VirtualMachineSizeTypesStandardNC24r, VirtualMachineSizeTypesStandardNC24rsV2, VirtualMachineSizeTypesStandardNC24rsV3, VirtualMachineSizeTypesStandardNC24sV2, VirtualMachineSizeTypesStandardNC24sV3, VirtualMachineSizeTypesStandardNC6, VirtualMachineSizeTypesStandardNC6sV2, VirtualMachineSizeTypesStandardNC6sV3, VirtualMachineSizeTypesStandardND12s, VirtualMachineSizeTypesStandardND24rs, VirtualMachineSizeTypesStandardND24s, VirtualMachineSizeTypesStandardND6s, VirtualMachineSizeTypesStandardNV12, VirtualMachineSizeTypesStandardNV24, VirtualMachineSizeTypesStandardNV6}
+ return []VirtualMachineSizeTypes{BasicA0, BasicA1, BasicA2, BasicA3, BasicA4, StandardA0, StandardA1, StandardA10, StandardA11, StandardA1V2, StandardA2, StandardA2mV2, StandardA2V2, StandardA3, StandardA4, StandardA4mV2, StandardA4V2, StandardA5, StandardA6, StandardA7, StandardA8, StandardA8mV2, StandardA8V2, StandardA9, StandardB1ms, StandardB1s, StandardB2ms, StandardB2s, StandardB4ms, StandardB8ms, StandardD1, StandardD11, StandardD11V2, StandardD12, StandardD12V2, StandardD13, StandardD13V2, StandardD14, StandardD14V2, StandardD15V2, StandardD16sV3, StandardD16V3, StandardD1V2, StandardD2, StandardD2sV3, StandardD2V2, StandardD2V3, StandardD3, StandardD32sV3, StandardD32V3, StandardD3V2, StandardD4, StandardD4sV3, StandardD4V2, StandardD4V3, StandardD5V2, StandardD64sV3, StandardD64V3, StandardD8sV3, StandardD8V3, StandardDS1, StandardDS11, StandardDS11V2, StandardDS12, StandardDS12V2, StandardDS13, StandardDS132V2, StandardDS134V2, StandardDS13V2, StandardDS14, StandardDS144V2, StandardDS148V2, StandardDS14V2, StandardDS15V2, StandardDS1V2, StandardDS2, StandardDS2V2, StandardDS3, StandardDS3V2, StandardDS4, StandardDS4V2, StandardDS5V2, StandardE16sV3, StandardE16V3, StandardE2sV3, StandardE2V3, StandardE3216V3, StandardE328sV3, StandardE32sV3, StandardE32V3, StandardE4sV3, StandardE4V3, StandardE6416sV3, StandardE6432sV3, StandardE64sV3, StandardE64V3, StandardE8sV3, StandardE8V3, StandardF1, StandardF16, StandardF16s, StandardF16sV2, StandardF1s, StandardF2, StandardF2s, StandardF2sV2, StandardF32sV2, StandardF4, StandardF4s, StandardF4sV2, StandardF64sV2, StandardF72sV2, StandardF8, StandardF8s, StandardF8sV2, StandardG1, StandardG2, StandardG3, StandardG4, StandardG5, StandardGS1, StandardGS2, StandardGS3, StandardGS4, StandardGS44, StandardGS48, StandardGS5, StandardGS516, StandardGS58, StandardH16, StandardH16m, StandardH16mr, StandardH16r, StandardH8, StandardH8m, StandardL16s, StandardL32s, StandardL4s, StandardL8s, StandardM12832ms, StandardM12864ms, StandardM128ms, StandardM128s, StandardM6416ms, StandardM6432ms, StandardM64ms, StandardM64s, StandardNC12, StandardNC12sV2, StandardNC12sV3, StandardNC24, StandardNC24r, StandardNC24rsV2, StandardNC24rsV3, StandardNC24sV2, StandardNC24sV3, StandardNC6, StandardNC6sV2, StandardNC6sV3, StandardND12s, StandardND24rs, StandardND24s, StandardND6s, StandardNV12, StandardNV24, StandardNV6}
}
// VMDiskTypes enumerates the values for vm disk types.
@@ -1910,17 +2111,17 @@ func PossibleVMDiskTypesValues() []VMDiskTypes {
type VMGuestPatchClassificationLinux string
const (
- // VMGuestPatchClassificationLinuxCritical ...
- VMGuestPatchClassificationLinuxCritical VMGuestPatchClassificationLinux = "Critical"
- // VMGuestPatchClassificationLinuxOther ...
- VMGuestPatchClassificationLinuxOther VMGuestPatchClassificationLinux = "Other"
- // VMGuestPatchClassificationLinuxSecurity ...
- VMGuestPatchClassificationLinuxSecurity VMGuestPatchClassificationLinux = "Security"
+ // Critical ...
+ Critical VMGuestPatchClassificationLinux = "Critical"
+ // Other ...
+ Other VMGuestPatchClassificationLinux = "Other"
+ // Security ...
+ Security VMGuestPatchClassificationLinux = "Security"
)
// PossibleVMGuestPatchClassificationLinuxValues returns an array of possible values for the VMGuestPatchClassificationLinux const type.
func PossibleVMGuestPatchClassificationLinuxValues() []VMGuestPatchClassificationLinux {
- return []VMGuestPatchClassificationLinux{VMGuestPatchClassificationLinuxCritical, VMGuestPatchClassificationLinuxOther, VMGuestPatchClassificationLinuxSecurity}
+ return []VMGuestPatchClassificationLinux{Critical, Other, Security}
}
// VMGuestPatchClassificationWindows enumerates the values for vm guest patch classification windows.
@@ -1973,17 +2174,17 @@ func PossibleVMGuestPatchRebootBehaviorValues() []VMGuestPatchRebootBehavior {
type VMGuestPatchRebootSetting string
const (
- // VMGuestPatchRebootSettingAlways ...
- VMGuestPatchRebootSettingAlways VMGuestPatchRebootSetting = "Always"
- // VMGuestPatchRebootSettingIfRequired ...
- VMGuestPatchRebootSettingIfRequired VMGuestPatchRebootSetting = "IfRequired"
- // VMGuestPatchRebootSettingNever ...
- VMGuestPatchRebootSettingNever VMGuestPatchRebootSetting = "Never"
+ // Always ...
+ Always VMGuestPatchRebootSetting = "Always"
+ // IfRequired ...
+ IfRequired VMGuestPatchRebootSetting = "IfRequired"
+ // Never ...
+ Never VMGuestPatchRebootSetting = "Never"
)
// PossibleVMGuestPatchRebootSettingValues returns an array of possible values for the VMGuestPatchRebootSetting const type.
func PossibleVMGuestPatchRebootSettingValues() []VMGuestPatchRebootSetting {
- return []VMGuestPatchRebootSetting{VMGuestPatchRebootSettingAlways, VMGuestPatchRebootSettingIfRequired, VMGuestPatchRebootSettingNever}
+ return []VMGuestPatchRebootSetting{Always, IfRequired, Never}
}
// VMGuestPatchRebootStatus enumerates the values for vm guest patch reboot status.
@@ -2024,6 +2225,26 @@ func PossibleWindowsPatchAssessmentModeValues() []WindowsPatchAssessmentMode {
return []WindowsPatchAssessmentMode{WindowsPatchAssessmentModeAutomaticByPlatform, WindowsPatchAssessmentModeImageDefault}
}
+// WindowsVMGuestPatchAutomaticByPlatformRebootSetting enumerates the values for windows vm guest patch
+// automatic by platform reboot setting.
+type WindowsVMGuestPatchAutomaticByPlatformRebootSetting string
+
+const (
+ // WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways ...
+ WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "Always"
+ // WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired ...
+ WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "IfRequired"
+ // WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever ...
+ WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "Never"
+ // WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown ...
+ WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "Unknown"
+)
+
+// PossibleWindowsVMGuestPatchAutomaticByPlatformRebootSettingValues returns an array of possible values for the WindowsVMGuestPatchAutomaticByPlatformRebootSetting const type.
+func PossibleWindowsVMGuestPatchAutomaticByPlatformRebootSettingValues() []WindowsVMGuestPatchAutomaticByPlatformRebootSetting {
+ return []WindowsVMGuestPatchAutomaticByPlatformRebootSetting{WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways, WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired, WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever, WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown}
+}
+
// WindowsVMGuestPatchMode enumerates the values for windows vm guest patch mode.
type WindowsVMGuestPatchMode string
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleries.go
similarity index 97%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleries.go
index 9730526f7605..370c530f0d24 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleries.go
@@ -70,7 +70,7 @@ func (client GalleriesClient) CreateOrUpdatePreparer(ctx context.Context, resour
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -151,7 +151,7 @@ func (client GalleriesClient) DeletePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -196,7 +196,8 @@ func (client GalleriesClient) DeleteResponder(resp *http.Response) (result autor
// resourceGroupName - the name of the resource group.
// galleryName - the name of the Shared Image Gallery.
// selectParameter - the select expression to apply on the operation.
-func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions) (result Gallery, err error) {
+// expand - the expand query option to apply on the operation.
+func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions, expand GalleryExpandParams) (result Gallery, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Get")
defer func() {
@@ -207,7 +208,7 @@ func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string,
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, selectParameter)
+ req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, selectParameter, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", nil, "Failure preparing request")
return
@@ -230,20 +231,23 @@ func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string,
}
// GetPreparer prepares the Get request.
-func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions) (*http.Request, error) {
+func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions, expand GalleryExpandParams) (*http.Request, error) {
pathParameters := map[string]interface{}{
"galleryName": autorest.Encode("path", galleryName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(string(selectParameter)) > 0 {
queryParameters["$select"] = autorest.Encode("query", selectParameter)
}
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
@@ -316,7 +320,7 @@ func (client GalleriesClient) ListPreparer(ctx context.Context) (*http.Request,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -432,7 +436,7 @@ func (client GalleriesClient) ListByResourceGroupPreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -540,7 +544,7 @@ func (client GalleriesClient) UpdatePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryapplications.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryapplications.go
index f496086291f8..a5804d0f38ff 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryapplications.go
@@ -75,7 +75,7 @@ func (client GalleryApplicationsClient) CreateOrUpdatePreparer(ctx context.Conte
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -159,7 +159,7 @@ func (client GalleryApplicationsClient) DeletePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -247,7 +247,7 @@ func (client GalleryApplicationsClient) GetPreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -329,7 +329,7 @@ func (client GalleryApplicationsClient) ListByGalleryPreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -441,7 +441,7 @@ func (client GalleryApplicationsClient) UpdatePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryapplicationversions.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryapplicationversions.go
index 947fae104d74..965afa79b20d 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryapplicationversions.go
@@ -94,7 +94,7 @@ func (client GalleryApplicationVersionsClient) CreateOrUpdatePreparer(ctx contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -180,7 +180,7 @@ func (client GalleryApplicationVersionsClient) DeletePreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -271,7 +271,7 @@ func (client GalleryApplicationVersionsClient) GetPreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -358,7 +358,7 @@ func (client GalleryApplicationVersionsClient) ListByGalleryApplicationPreparer(
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -472,7 +472,7 @@ func (client GalleryApplicationVersionsClient) UpdatePreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryimages.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryimages.go
index bf16351b3ec6..2f92ec0e7ca1 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryimages.go
@@ -86,7 +86,7 @@ func (client GalleryImagesClient) CreateOrUpdatePreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -169,7 +169,7 @@ func (client GalleryImagesClient) DeletePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -256,7 +256,7 @@ func (client GalleryImagesClient) GetPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -337,7 +337,7 @@ func (client GalleryImagesClient) ListByGalleryPreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -448,7 +448,7 @@ func (client GalleryImagesClient) UpdatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryimageversions.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryimageversions.go
index 0ee7a395b9ea..496a6686d1e5 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/galleryimageversions.go
@@ -84,7 +84,7 @@ func (client GalleryImageVersionsClient) CreateOrUpdatePreparer(ctx context.Cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -169,7 +169,7 @@ func (client GalleryImageVersionsClient) DeletePreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -259,7 +259,7 @@ func (client GalleryImageVersionsClient) GetPreparer(ctx context.Context, resour
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -346,7 +346,7 @@ func (client GalleryImageVersionsClient) ListByGalleryImagePreparer(ctx context.
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -459,7 +459,7 @@ func (client GalleryImageVersionsClient) UpdatePreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/gallerysharingprofile.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/gallerysharingprofile.go
index eab53278bb40..2f55daecf584 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/gallerysharingprofile.go
@@ -70,7 +70,7 @@ func (client GallerySharingProfileClient) UpdatePreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2021-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/images.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/images.go
index 66a9bcbd0c44..57908cb564e0 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/images.go
@@ -69,7 +69,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -150,7 +150,7 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -236,7 +236,7 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -316,7 +316,7 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -432,7 +432,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -539,7 +539,7 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/loganalytics.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/loganalytics.go
index ec30f9cc8c61..0e82102d8c76 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/loganalytics.go
@@ -75,7 +75,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -162,7 +162,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/models.go
similarity index 96%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/models.go
index 9718eb39f588..551a1b034698 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/models.go
@@ -19,13 +19,15 @@ import (
)
// The package's fully qualified name.
-const fqdn = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
// AccessURI a disk access SAS uri.
type AccessURI struct {
autorest.Response `json:"-"`
// AccessSAS - READ-ONLY; A SAS uri for accessing a disk.
AccessSAS *string `json:"accessSAS,omitempty"`
+ // SecurityDataAccessSAS - READ-ONLY; A SAS uri for accessing a VM guest state.
+ SecurityDataAccessSAS *string `json:"securityDataAccessSAS,omitempty"`
}
// MarshalJSON is the custom marshaler for AccessURI.
@@ -47,11 +49,11 @@ type AdditionalCapabilities struct {
// Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name,
// and the pass in which the content is applied.
type AdditionalUnattendContent struct {
- // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'PassNamesOobeSystem'
+ // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'OobeSystem'
PassName PassNames `json:"passName,omitempty"`
- // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'ComponentNamesMicrosoftWindowsShellSetup'
+ // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'MicrosoftWindowsShellSetup'
ComponentName ComponentNames `json:"componentName,omitempty"`
- // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'SettingNamesAutoLogon', 'SettingNamesFirstLogonCommands'
+ // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'AutoLogon', 'FirstLogonCommands'
SettingName SettingNames `json:"settingName,omitempty"`
// Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted.
Content *string `json:"content,omitempty"`
@@ -100,6 +102,8 @@ type AutomaticOSUpgradePolicy struct {
EnableAutomaticOSUpgrade *bool `json:"enableAutomaticOSUpgrade,omitempty"`
// DisableAutomaticRollback - Whether OS image rollback feature should be disabled. Default value is false.
DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty"`
+ // UseRollingUpgradePolicy - Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Default value is false. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS.
+ UseRollingUpgradePolicy *bool `json:"useRollingUpgradePolicy,omitempty"`
}
// AutomaticOSUpgradeProperties describes automatic OS upgrade properties on the image.
@@ -113,8 +117,10 @@ type AutomaticOSUpgradeProperties struct {
type AutomaticRepairsPolicy struct {
// Enabled - Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false.
Enabled *bool `json:"enabled,omitempty"`
- // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The minimum allowed grace period is 30 minutes (PT30M), which is also the default value. The maximum allowed grace period is 90 minutes (PT90M).
+ // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The minimum allowed grace period is 10 minutes (PT10M), which is also the default value. The maximum allowed grace period is 90 minutes (PT90M).
GracePeriod *string `json:"gracePeriod,omitempty"`
+ // RepairAction - Type of repair action (replace, restart, reimage) that will be used for repairing unhealthy virtual machines in the scale set. Default value is replace. Possible values include: 'Replace', 'Restart', 'Reimage'
+ RepairAction RepairAction `json:"repairAction,omitempty"`
}
// AvailabilitySet specifies information about the availability set that the virtual machine should be
@@ -1254,6 +1260,8 @@ type CapacityReservationProperties struct {
ProvisioningState *string `json:"provisioningState,omitempty"`
// InstanceView - READ-ONLY; The Capacity reservation instance view.
InstanceView *CapacityReservationInstanceView `json:"instanceView,omitempty"`
+ // TimeCreated - READ-ONLY; Specifies the time at which the Capacity Reservation resource was created.
Minimum api-version: 2022-03-01.
+ TimeCreated *date.Time `json:"timeCreated,omitempty"`
}
// MarshalJSON is the custom marshaler for CapacityReservationProperties.
@@ -1775,7 +1783,7 @@ type CloudServiceProperties struct {
// AllowModelOverride - (Optional) Indicates whether the role sku properties (roleProfile.roles.sku) specified in the model/template should override the role instance count and vm size specified in the .cscfg and .csdef respectively.
// The default value is `false`.
AllowModelOverride *bool `json:"allowModelOverride,omitempty"`
- // UpgradeMode - Possible values include: 'CloudServiceUpgradeModeAuto', 'CloudServiceUpgradeModeManual', 'CloudServiceUpgradeModeSimultaneous'
+ // UpgradeMode - Possible values include: 'Auto', 'Manual', 'Simultaneous'
UpgradeMode CloudServiceUpgradeMode `json:"upgradeMode,omitempty"`
RoleProfile *CloudServiceRoleProfile `json:"roleProfile,omitempty"`
OsProfile *CloudServiceOsProfile `json:"osProfile,omitempty"`
@@ -2776,14 +2784,14 @@ func (cgiVar *CommunityGalleryImage) UnmarshalJSON(body []byte) error {
type CommunityGalleryImageProperties struct {
// OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
+ // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'Generalized', 'Specialized'
OsState OperatingSystemStateTypes `json:"osState,omitempty"`
// EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable.
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
Identifier *GalleryImageIdentifier `json:"identifier,omitempty"`
Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"`
Disallowed *Disallowed `json:"disallowed,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
+ // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2'
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// Features - A list of gallery image features.
Features *[]GalleryImageFeature `json:"features,omitempty"`
@@ -2884,9 +2892,43 @@ type CommunityGalleryImageVersionProperties struct {
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
}
+// CommunityGalleryInfo information of community gallery if current gallery is shared to community
+type CommunityGalleryInfo struct {
+ // PublisherURI - Community gallery publisher uri
+ PublisherURI *string `json:"publisherUri,omitempty"`
+ // PublisherContact - Community gallery publisher contact email
+ PublisherContact *string `json:"publisherContact,omitempty"`
+ // Eula - Community gallery publisher eula
+ Eula *string `json:"eula,omitempty"`
+ // PublicNamePrefix - Community gallery public name prefix
+ PublicNamePrefix *string `json:"publicNamePrefix,omitempty"`
+ // CommunityGalleryEnabled - READ-ONLY; Contains info about whether community gallery sharing is enabled.
+ CommunityGalleryEnabled *bool `json:"communityGalleryEnabled,omitempty"`
+ // PublicNames - READ-ONLY; Community gallery public name list.
+ PublicNames *[]string `json:"publicNames,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for CommunityGalleryInfo.
+func (cgiVar CommunityGalleryInfo) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cgiVar.PublisherURI != nil {
+ objectMap["publisherUri"] = cgiVar.PublisherURI
+ }
+ if cgiVar.PublisherContact != nil {
+ objectMap["publisherContact"] = cgiVar.PublisherContact
+ }
+ if cgiVar.Eula != nil {
+ objectMap["eula"] = cgiVar.Eula
+ }
+ if cgiVar.PublicNamePrefix != nil {
+ objectMap["publicNamePrefix"] = cgiVar.PublicNamePrefix
+ }
+ return json.Marshal(objectMap)
+}
+
// CreationData data used when creating a disk.
type CreationData struct {
- // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'DiskCreateOptionEmpty', 'DiskCreateOptionAttach', 'DiskCreateOptionFromImage', 'DiskCreateOptionImport', 'DiskCreateOptionCopy', 'DiskCreateOptionRestore', 'DiskCreateOptionUpload', 'DiskCreateOptionCopyStart'
+ // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy', 'Restore', 'Upload', 'CopyStart', 'ImportSecure', 'UploadPreparedSecure'
CreateOption DiskCreateOption `json:"createOption,omitempty"`
// StorageAccountID - Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk.
StorageAccountID *string `json:"storageAccountId,omitempty"`
@@ -2904,6 +2946,8 @@ type CreationData struct {
UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"`
// LogicalSectorSize - Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.
LogicalSectorSize *int32 `json:"logicalSectorSize,omitempty"`
+ // SecurityDataURI - If createOption is ImportSecure, this is the URI of a blob to be imported into VM guest state.
+ SecurityDataURI *string `json:"securityDataUri,omitempty"`
}
// MarshalJSON is the custom marshaler for CreationData.
@@ -2933,6 +2977,9 @@ func (cd CreationData) MarshalJSON() ([]byte, error) {
if cd.LogicalSectorSize != nil {
objectMap["logicalSectorSize"] = cd.LogicalSectorSize
}
+ if cd.SecurityDataURI != nil {
+ objectMap["securityDataUri"] = cd.SecurityDataURI
+ }
return json.Marshal(objectMap)
}
@@ -2962,7 +3009,7 @@ type DataDisk struct {
DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"`
// DiskMBpsReadWrite - READ-ONLY; Specifies the bandwidth in MB per second for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set.
DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"`
- // DetachOption - Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: **ForceDetach**.
detachOption: **ForceDetach** is applicable only for managed data disks. If a previous detachment attempt of the data disk did not complete due to an unexpected failure from the virtual machine and the disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes might not have been flushed when using this detach behavior.
This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible values include: 'DiskDetachOptionTypesForceDetach'
+ // DetachOption - Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: **ForceDetach**.
detachOption: **ForceDetach** is applicable only for managed data disks. If a previous detachment attempt of the data disk did not complete due to an unexpected failure from the virtual machine and the disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes might not have been flushed when using this detach behavior.
This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible values include: 'ForceDetach'
DetachOption DiskDetachOptionTypes `json:"detachOption,omitempty"`
// DeleteOption - Specifies whether data disk should be deleted or detached upon VM deletion.
Possible values:
**Delete** If this value is used, the data disk is deleted when VM is deleted.
**Detach** If this value is used, the data disk is retained after VM is deleted.
The default value is set to **detach**. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach'
DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"`
@@ -3450,6 +3497,8 @@ type DedicatedHostGroupProperties struct {
InstanceView *DedicatedHostGroupInstanceView `json:"instanceView,omitempty"`
// SupportAutomaticPlacement - Specifies whether virtual machines or virtual machine scale sets can be placed automatically on the dedicated host group. Automatic placement means resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to 'false' when not provided.
Minimum api-version: 2020-06-01.
SupportAutomaticPlacement *bool `json:"supportAutomaticPlacement,omitempty"`
+ // AdditionalCapabilities - Enables or disables a capability on the dedicated host group.
Minimum api-version: 2022-03-01.
+ AdditionalCapabilities *DedicatedHostGroupPropertiesAdditionalCapabilities `json:"additionalCapabilities,omitempty"`
}
// MarshalJSON is the custom marshaler for DedicatedHostGroupProperties.
@@ -3461,9 +3510,19 @@ func (dhgp DedicatedHostGroupProperties) MarshalJSON() ([]byte, error) {
if dhgp.SupportAutomaticPlacement != nil {
objectMap["supportAutomaticPlacement"] = dhgp.SupportAutomaticPlacement
}
+ if dhgp.AdditionalCapabilities != nil {
+ objectMap["additionalCapabilities"] = dhgp.AdditionalCapabilities
+ }
return json.Marshal(objectMap)
}
+// DedicatedHostGroupPropertiesAdditionalCapabilities enables or disables a capability on the dedicated
+// host group.
Minimum api-version: 2022-03-01.
+type DedicatedHostGroupPropertiesAdditionalCapabilities struct {
+ // UltraSSDEnabled - The flag that enables or disables a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group. For the Virtual Machines to be UltraSSD Enabled, UltraSSDEnabled flag for the resource needs to be set true as well. The value is defaulted to 'false' when not provided. Please refer to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd for more details on Ultra SSD feature.
NOTE: The ultraSSDEnabled setting can only be enabled for Host Groups that are created as zonal.
Minimum api-version: 2022-03-01.
+ UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"`
+}
+
// DedicatedHostGroupUpdate specifies information about the dedicated host group that the dedicated host
// should be assigned to. Only tags may be updated.
type DedicatedHostGroupUpdate struct {
@@ -3755,6 +3814,8 @@ type DedicatedHostProperties struct {
ProvisioningState *string `json:"provisioningState,omitempty"`
// InstanceView - READ-ONLY; The dedicated host instance view.
InstanceView *DedicatedHostInstanceView `json:"instanceView,omitempty"`
+ // TimeCreated - READ-ONLY; Specifies the time at which the Dedicated Host resource was created.
Minimum api-version: 2022-03-01.
+ TimeCreated *date.Time `json:"timeCreated,omitempty"`
}
// MarshalJSON is the custom marshaler for DedicatedHostProperties.
@@ -3852,6 +3913,43 @@ func (future *DedicatedHostsDeleteFuture) result(client DedicatedHostsClient) (a
return
}
+// DedicatedHostsRestartFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type DedicatedHostsRestartFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(DedicatedHostsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *DedicatedHostsRestartFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for DedicatedHostsRestartFuture.Result.
+func (future *DedicatedHostsRestartFuture) result(client DedicatedHostsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.DedicatedHostsRestartFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ ar.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsRestartFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// DedicatedHostsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type DedicatedHostsUpdateFuture struct {
@@ -3951,16 +4049,16 @@ func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error {
// DiagnosticsProfile specifies the boot diagnostic settings state.
Minimum api-version:
// 2015-06-15.
type DiagnosticsProfile struct {
- // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.
You can easily view the output of your console log.
Azure also enables you to see a screenshot of the VM from the hypervisor.
+ // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.
**NOTE**: If storageUri is being specified then ensure that the storage account is in the same region and subscription as the VM.
You can easily view the output of your console log.
Azure also enables you to see a screenshot of the VM from the hypervisor.
BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"`
}
// DiffDiskSettings describes the parameters of ephemeral disk settings that can be specified for operating
// system disk.
NOTE: The ephemeral disk settings can only be specified for managed disk.
type DiffDiskSettings struct {
- // Option - Specifies the ephemeral disk settings for operating system disk. Possible values include: 'DiffDiskOptionsLocal'
+ // Option - Specifies the ephemeral disk settings for operating system disk. Possible values include: 'Local'
Option DiffDiskOptions `json:"option,omitempty"`
- // Placement - Specifies the ephemeral disk placement for operating system disk.
Possible values are:
**CacheDisk**
**ResourceDisk**
Default: **CacheDisk** if one is configured for the VM size otherwise **ResourceDisk** is used.
Refer to VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Possible values include: 'DiffDiskPlacementCacheDisk', 'DiffDiskPlacementResourceDisk'
+ // Placement - Specifies the ephemeral disk placement for operating system disk.
Possible values are:
**CacheDisk**
**ResourceDisk**
Default: **CacheDisk** if one is configured for the VM size otherwise **ResourceDisk** is used.
Refer to VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Possible values include: 'CacheDisk', 'ResourceDisk'
Placement DiffDiskPlacement `json:"placement,omitempty"`
}
@@ -5126,7 +5224,7 @@ func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error {
// DiskEncryptionSetUpdateProperties disk encryption set resource update properties.
type DiskEncryptionSetUpdateProperties struct {
- // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys'
+ // EncryptionType - Possible values include: 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys', 'ConfidentialVMEncryptedWithCustomerKey'
EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"`
ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"`
// RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version.
@@ -5314,7 +5412,7 @@ type DiskProperties struct {
TimeCreated *date.Time `json:"timeCreated,omitempty"`
// OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
+ // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2'
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// PurchasePlan - Purchase plan information for the the image from which the OS disk was created. E.g. - {name: 2019-Datacenter, publisher: MicrosoftWindowsServer, product: WindowsServer}
PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
@@ -5340,7 +5438,7 @@ type DiskProperties struct {
DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"`
// DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10.
DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"`
- // DiskState - The state of the disk. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateFrozen', 'DiskStateActiveSAS', 'DiskStateActiveSASFrozen', 'DiskStateReadyToUpload', 'DiskStateActiveUpload'
+ // DiskState - The state of the disk. Possible values include: 'Unattached', 'Attached', 'Reserved', 'Frozen', 'ActiveSAS', 'ActiveSASFrozen', 'ReadyToUpload', 'ActiveUpload'
DiskState DiskState `json:"diskState,omitempty"`
// Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
Encryption *Encryption `json:"encryption,omitempty"`
@@ -5348,7 +5446,7 @@ type DiskProperties struct {
MaxShares *int32 `json:"maxShares,omitempty"`
// ShareInfo - READ-ONLY; Details of the list of all VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs.
ShareInfo *[]ShareInfoElement `json:"shareInfo,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
+ // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll'
NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
// DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
DiskAccessID *string `json:"diskAccessId,omitempty"`
@@ -5364,8 +5462,10 @@ type DiskProperties struct {
SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"`
// CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation.
CompletionPercent *float64 `json:"completionPercent,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled'
PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
+ // DataAccessAuthMode - Possible values include: 'DataAccessAuthModeAzureActiveDirectory', 'DataAccessAuthModeNone'
+ DataAccessAuthMode DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"`
}
// MarshalJSON is the custom marshaler for DiskProperties.
@@ -5437,6 +5537,9 @@ func (dp DiskProperties) MarshalJSON() ([]byte, error) {
if dp.PublicNetworkAccess != "" {
objectMap["publicNetworkAccess"] = dp.PublicNetworkAccess
}
+ if dp.DataAccessAuthMode != "" {
+ objectMap["dataAccessAuthMode"] = dp.DataAccessAuthMode
+ }
return json.Marshal(objectMap)
}
@@ -5555,6 +5658,14 @@ func (future *DiskRestorePointGrantAccessFuture) result(client DiskRestorePointC
return
}
+// DiskRestorePointInstanceView the instance view of a disk restore point.
+type DiskRestorePointInstanceView struct {
+ // ID - Disk restore point Id.
+ ID *string `json:"id,omitempty"`
+ // ReplicationStatus - The disk restore point replication status information.
+ ReplicationStatus *DiskRestorePointReplicationStatus `json:"replicationStatus,omitempty"`
+}
+
// DiskRestorePointList the List Disk Restore Points operation response.
type DiskRestorePointList struct {
autorest.Response `json:"-"`
@@ -5718,15 +5829,15 @@ func NewDiskRestorePointListPage(cur DiskRestorePointList, getNextPage func(cont
type DiskRestorePointProperties struct {
// TimeCreated - READ-ONLY; The timestamp of restorePoint creation
TimeCreated *date.Time `json:"timeCreated,omitempty"`
- // SourceResourceID - READ-ONLY; arm id of source disk
+ // SourceResourceID - READ-ONLY; arm id of source disk or source disk restore point.
SourceResourceID *string `json:"sourceResourceId,omitempty"`
// OsType - READ-ONLY; The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
+ // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2'
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// PurchasePlan - Purchase plan information for the the image from which the OS disk was created.
PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities (like accelerated networking) for the image from which the OS disk was created.
+ // SupportedCapabilities - List of supported capabilities for the image from which the OS disk was created.
SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
// FamilyID - READ-ONLY; id of the backing snapshot's MIS family
FamilyID *string `json:"familyId,omitempty"`
@@ -5736,14 +5847,18 @@ type DiskRestorePointProperties struct {
Encryption *Encryption `json:"encryption,omitempty"`
// SupportsHibernation - Indicates the OS on a disk supports hibernation.
SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
+ // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll'
NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled'
PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
// DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
DiskAccessID *string `json:"diskAccessId,omitempty"`
- // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation.
+ // CompletionPercent - Percentage complete for the background copy of disk restore point when source resource is from a different region.
CompletionPercent *float64 `json:"completionPercent,omitempty"`
+ // ReplicationState - READ-ONLY; Replication state of disk restore point when source resource is from a different region.
+ ReplicationState *string `json:"replicationState,omitempty"`
+ // SourceResourceLocation - READ-ONLY; Location of source disk or source disk restore point when source resource is from a different region.
+ SourceResourceLocation *string `json:"sourceResourceLocation,omitempty"`
}
// MarshalJSON is the custom marshaler for DiskRestorePointProperties.
@@ -5776,6 +5891,14 @@ func (drpp DiskRestorePointProperties) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
+// DiskRestorePointReplicationStatus the instance view of a disk restore point.
+type DiskRestorePointReplicationStatus struct {
+ // Status - The resource status information.
+ Status *InstanceViewStatus `json:"status,omitempty"`
+ // CompletionPercent - Replication completion percentage.
+ CompletionPercent *int32 `json:"completionPercent,omitempty"`
+}
+
// DiskRestorePointRevokeAccessFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type DiskRestorePointRevokeAccessFuture struct {
@@ -5894,8 +6017,10 @@ func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Respons
// DiskSecurityProfile contains the security related information for the resource.
type DiskSecurityProfile struct {
- // SecurityType - Possible values include: 'DiskSecurityTypesTrustedLaunch'
+ // SecurityType - Possible values include: 'TrustedLaunch', 'ConfidentialVMVMGuestStateOnlyEncryptedWithPlatformKey', 'ConfidentialVMDiskEncryptedWithPlatformKey', 'ConfidentialVMDiskEncryptedWithCustomerKey'
SecurityType DiskSecurityTypes `json:"securityType,omitempty"`
+ // SecureVMDiskEncryptionSetID - ResourceId of the disk encryption set associated to Confidential VM supported disk encrypted with customer managed key
+ SecureVMDiskEncryptionSetID *string `json:"secureVMDiskEncryptionSetId,omitempty"`
}
// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running
@@ -5944,7 +6069,7 @@ func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI,
// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS,
// Premium_ZRS, or StandardSSD_ZRS.
type DiskSku struct {
- // Name - The sku name. Possible values include: 'DiskStorageAccountTypesStandardLRS', 'DiskStorageAccountTypesPremiumLRS', 'DiskStorageAccountTypesStandardSSDLRS', 'DiskStorageAccountTypesUltraSSDLRS', 'DiskStorageAccountTypesPremiumZRS', 'DiskStorageAccountTypesStandardSSDZRS'
+ // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS', 'PremiumZRS', 'StandardSSDZRS'
Name DiskStorageAccountTypes `json:"name,omitempty"`
// Tier - READ-ONLY; The sku tier.
Tier *string `json:"tier,omitempty"`
@@ -6123,7 +6248,7 @@ type DiskUpdateProperties struct {
MaxShares *int32 `json:"maxShares,omitempty"`
// Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
Encryption *Encryption `json:"encryption,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
+ // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll'
NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
// DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
DiskAccessID *string `json:"diskAccessId,omitempty"`
@@ -6133,14 +6258,16 @@ type DiskUpdateProperties struct {
BurstingEnabled *bool `json:"burstingEnabled,omitempty"`
// PurchasePlan - Purchase plan information to be added on the OS disk
PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities (like accelerated networking) to be added on the OS disk.
+ // SupportedCapabilities - List of supported capabilities to be added on the OS disk.
SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
// PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending.
PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"`
// SupportsHibernation - Indicates the OS on a disk supports hibernation.
SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled'
PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
+ // DataAccessAuthMode - Possible values include: 'DataAccessAuthModeAzureActiveDirectory', 'DataAccessAuthModeNone'
+ DataAccessAuthMode DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"`
}
// MarshalJSON is the custom marshaler for DiskUpdateProperties.
@@ -6197,6 +6324,9 @@ func (dup DiskUpdateProperties) MarshalJSON() ([]byte, error) {
if dup.PublicNetworkAccess != "" {
objectMap["publicNetworkAccess"] = dup.PublicNetworkAccess
}
+ if dup.DataAccessAuthMode != "" {
+ objectMap["dataAccessAuthMode"] = dup.DataAccessAuthMode
+ }
return json.Marshal(objectMap)
}
@@ -6238,7 +6368,7 @@ func (esi EncryptionSetIdentity) MarshalJSON() ([]byte, error) {
// EncryptionSetProperties ...
type EncryptionSetProperties struct {
- // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys'
+ // EncryptionType - Possible values include: 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys', 'ConfidentialVMEncryptedWithCustomerKey'
EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"`
// ActiveKey - The key vault key which is currently used by this disk encryption set.
ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"`
@@ -7307,8 +7437,10 @@ type GalleryApplicationVersionPublishingProfile struct {
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
// StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow'
+ // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'Full', 'Shallow'
ReplicationMode ReplicationMode `json:"replicationMode,omitempty"`
+ // TargetExtendedLocations - The target extended locations where the Image Version is going to be replicated to. This property is updatable.
+ TargetExtendedLocations *[]GalleryTargetExtendedLocation `json:"targetExtendedLocations,omitempty"`
}
// MarshalJSON is the custom marshaler for GalleryApplicationVersionPublishingProfile.
@@ -7341,6 +7473,9 @@ func (gavpp GalleryApplicationVersionPublishingProfile) MarshalJSON() ([]byte, e
if gavpp.ReplicationMode != "" {
objectMap["replicationMode"] = gavpp.ReplicationMode
}
+ if gavpp.TargetExtendedLocations != nil {
+ objectMap["targetExtendedLocations"] = gavpp.TargetExtendedLocations
+ }
return json.Marshal(objectMap)
}
@@ -7567,8 +7702,10 @@ type GalleryArtifactPublishingProfileBase struct {
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
// StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow'
+ // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'Full', 'Shallow'
ReplicationMode ReplicationMode `json:"replicationMode,omitempty"`
+ // TargetExtendedLocations - The target extended locations where the Image Version is going to be replicated to. This property is updatable.
+ TargetExtendedLocations *[]GalleryTargetExtendedLocation `json:"targetExtendedLocations,omitempty"`
}
// MarshalJSON is the custom marshaler for GalleryArtifactPublishingProfileBase.
@@ -7592,6 +7729,9 @@ func (gappb GalleryArtifactPublishingProfileBase) MarshalJSON() ([]byte, error)
if gappb.ReplicationMode != "" {
objectMap["replicationMode"] = gappb.ReplicationMode
}
+ if gappb.TargetExtendedLocations != nil {
+ objectMap["targetExtendedLocations"] = gappb.TargetExtendedLocations
+ }
return json.Marshal(objectMap)
}
@@ -7655,6 +7795,13 @@ func (gdi GalleryDiskImage) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
+// GalleryExtendedLocation the name of the extended location.
+type GalleryExtendedLocation struct {
+ Name *string `json:"name,omitempty"`
+ // Type - Possible values include: 'GalleryExtendedLocationTypeEdgeZone', 'GalleryExtendedLocationTypeUnknown'
+ Type GalleryExtendedLocationType `json:"type,omitempty"`
+}
+
// GalleryIdentifier describes the gallery unique name.
type GalleryIdentifier struct {
// UniqueName - READ-ONLY; The unique name of the Shared Image Gallery. This name is generated automatically by Azure.
@@ -7956,9 +8103,9 @@ type GalleryImageProperties struct {
ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"`
// OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
+ // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'Generalized', 'Specialized'
OsState OperatingSystemStateTypes `json:"osState,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
+ // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2'
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable.
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
@@ -7970,6 +8117,8 @@ type GalleryImageProperties struct {
ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"`
// Features - A list of gallery image features.
Features *[]GalleryImageFeature `json:"features,omitempty"`
+ // Architecture - The architecture of the image. Applicable to OS disks only. Possible values include: 'X64', 'Arm64'
+ Architecture Architecture `json:"architecture,omitempty"`
}
// MarshalJSON is the custom marshaler for GalleryImageProperties.
@@ -8014,6 +8163,9 @@ func (gip GalleryImageProperties) MarshalJSON() ([]byte, error) {
if gip.Features != nil {
objectMap["features"] = gip.Features
}
+ if gip.Architecture != "" {
+ objectMap["architecture"] = gip.Architecture
+ }
return json.Marshal(objectMap)
}
@@ -8521,8 +8673,10 @@ type GalleryImageVersionPublishingProfile struct {
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
// StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow'
+ // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'Full', 'Shallow'
ReplicationMode ReplicationMode `json:"replicationMode,omitempty"`
+ // TargetExtendedLocations - The target extended locations where the Image Version is going to be replicated to. This property is updatable.
+ TargetExtendedLocations *[]GalleryTargetExtendedLocation `json:"targetExtendedLocations,omitempty"`
}
// MarshalJSON is the custom marshaler for GalleryImageVersionPublishingProfile.
@@ -8546,6 +8700,9 @@ func (givpp GalleryImageVersionPublishingProfile) MarshalJSON() ([]byte, error)
if givpp.ReplicationMode != "" {
objectMap["replicationMode"] = givpp.ReplicationMode
}
+ if givpp.TargetExtendedLocations != nil {
+ objectMap["targetExtendedLocations"] = givpp.TargetExtendedLocations
+ }
return json.Marshal(objectMap)
}
@@ -8954,6 +9111,8 @@ type GalleryProperties struct {
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
SharingProfile *SharingProfile `json:"sharingProfile,omitempty"`
SoftDeletePolicy *SoftDeletePolicy `json:"softDeletePolicy,omitempty"`
+ // SharingStatus - READ-ONLY
+ SharingStatus *SharingStatus `json:"sharingStatus,omitempty"`
}
// MarshalJSON is the custom marshaler for GalleryProperties.
@@ -9017,6 +9176,18 @@ func (future *GallerySharingProfileUpdateFuture) result(client GallerySharingPro
return
}
+// GalleryTargetExtendedLocation ...
+type GalleryTargetExtendedLocation struct {
+ // Name - The name of the region.
+ Name *string `json:"name,omitempty"`
+ ExtendedLocation *GalleryExtendedLocation `json:"extendedLocation,omitempty"`
+ // ExtendedLocationReplicaCount - The number of replicas of the Image Version to be created per extended location. This property is updatable.
+ ExtendedLocationReplicaCount *int32 `json:"extendedLocationReplicaCount,omitempty"`
+ // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
+ StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
+ Encryption *EncryptionImages `json:"encryption,omitempty"`
+}
+
// GalleryUpdate specifies information about the Shared Image Gallery that you want to update.
type GalleryUpdate struct {
*GalleryProperties `json:"properties,omitempty"`
@@ -9104,15 +9275,17 @@ func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error {
// GrantAccessData data used for requesting a SAS.
type GrantAccessData struct {
- // Access - Possible values include: 'AccessLevelNone', 'AccessLevelRead', 'AccessLevelWrite'
+ // Access - Possible values include: 'None', 'Read', 'Write'
Access AccessLevel `json:"access,omitempty"`
// DurationInSeconds - Time duration in seconds until the SAS access expires.
DurationInSeconds *int32 `json:"durationInSeconds,omitempty"`
+ // GetSecureVMGuestStateSAS - Set this flag to true to get additional SAS for VM guest state
+ GetSecureVMGuestStateSAS *bool `json:"getSecureVMGuestStateSAS,omitempty"`
}
// HardwareProfile specifies the hardware settings for the virtual machine.
type HardwareProfile struct {
- // VMSize - Specifies the size of the virtual machine.
The enum data type is currently deprecated and will be removed by December 23rd 2023.
Recommended way to get the list of available sizes is using these APIs:
[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)
[List all available virtual machine sizes in a region]( https://docs.microsoft.com/rest/api/compute/resourceskus/list)
[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/sizes).
The available VM sizes depend on region and availability set. Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24'
+ // VMSize - Specifies the size of the virtual machine.
The enum data type is currently deprecated and will be removed by December 23rd 2023.
Recommended way to get the list of available sizes is using these APIs:
[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)
[List all available virtual machine sizes in a region]( https://docs.microsoft.com/rest/api/compute/resourceskus/list)
[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/sizes).
The available VM sizes depend on region and availability set. Possible values include: 'BasicA0', 'BasicA1', 'BasicA2', 'BasicA3', 'BasicA4', 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardA1V2', 'StandardA2V2', 'StandardA4V2', 'StandardA8V2', 'StandardA2mV2', 'StandardA4mV2', 'StandardA8mV2', 'StandardB1s', 'StandardB1ms', 'StandardB2s', 'StandardB2ms', 'StandardB4ms', 'StandardB8ms', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD2V3', 'StandardD4V3', 'StandardD8V3', 'StandardD16V3', 'StandardD32V3', 'StandardD64V3', 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardD15V2', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardDS1V2', 'StandardDS2V2', 'StandardDS3V2', 'StandardDS4V2', 'StandardDS5V2', 'StandardDS11V2', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardDS134V2', 'StandardDS132V2', 'StandardDS148V2', 'StandardDS144V2', 'StandardE2V3', 'StandardE4V3', 'StandardE8V3', 'StandardE16V3', 'StandardE32V3', 'StandardE64V3', 'StandardE2sV3', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardE3216V3', 'StandardE328sV3', 'StandardE6432sV3', 'StandardE6416sV3', 'StandardF1', 'StandardF2', 'StandardF4', 'StandardF8', 'StandardF16', 'StandardF1s', 'StandardF2s', 'StandardF4s', 'StandardF8s', 'StandardF16s', 'StandardF2sV2', 'StandardF4sV2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardGS48', 'StandardGS44', 'StandardGS516', 'StandardGS58', 'StandardH8', 'StandardH16', 'StandardH8m', 'StandardH16m', 'StandardH16r', 'StandardH16mr', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s', 'StandardM64s', 'StandardM64ms', 'StandardM128s', 'StandardM128ms', 'StandardM6432ms', 'StandardM6416ms', 'StandardM12864ms', 'StandardM12832ms', 'StandardNC6', 'StandardNC12', 'StandardNC24', 'StandardNC24r', 'StandardNC6sV2', 'StandardNC12sV2', 'StandardNC24sV2', 'StandardNC24rsV2', 'StandardNC6sV3', 'StandardNC12sV3', 'StandardNC24sV3', 'StandardNC24rsV3', 'StandardND6s', 'StandardND12s', 'StandardND24s', 'StandardND24rs', 'StandardNV6', 'StandardNV12', 'StandardNV24'
VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"`
// VMSizeProperties - Specifies the properties for customizing the size of the virtual machine. Minimum api-version: 2021-07-01.
This feature is still in preview mode and is not supported for VirtualMachineScaleSet.
Please follow the instructions in [VM Customization](https://aka.ms/vmcustomization) for more details.
VMSizeProperties *VMSizeProperties `json:"vmSizeProperties,omitempty"`
@@ -9248,7 +9421,7 @@ type ImageDataDisk struct {
Caching CachingTypes `json:"caching,omitempty"`
// DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
+ // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS', 'StorageAccountTypesPremiumV2LRS'
StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
// DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk.
DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
@@ -9266,7 +9439,7 @@ type ImageDisk struct {
Caching CachingTypes `json:"caching,omitempty"`
// DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
+ // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS', 'StorageAccountTypesPremiumV2LRS'
StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
// DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk.
DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
@@ -9443,7 +9616,7 @@ func NewImageListResultPage(cur ImageListResult, getNextPage func(context.Contex
type ImageOSDisk struct {
// OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - The OS State. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
+ // OsState - The OS State. For managed images, use Generalized. Possible values include: 'Generalized', 'Specialized'
OsState OperatingSystemStateTypes `json:"osState,omitempty"`
// Snapshot - The snapshot.
Snapshot *SubResource `json:"snapshot,omitempty"`
@@ -9455,7 +9628,7 @@ type ImageOSDisk struct {
Caching CachingTypes `json:"caching,omitempty"`
// DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
+ // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS', 'StorageAccountTypesPremiumV2LRS'
StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
// DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk.
DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
@@ -9510,12 +9683,14 @@ type ImageReference struct {
Offer *string `json:"offer,omitempty"`
// Sku - The image SKU.
Sku *string `json:"sku,omitempty"`
- // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.
+ // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. Please do not use field 'version' for gallery image deployment, gallery image should always use 'id' field for deployment, to use 'latest' version of gallery image, just set '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageName}' in the 'id' field without version input.
Version *string `json:"version,omitempty"`
// ExactVersion - READ-ONLY; Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual machine. This readonly field differs from 'version', only if the value specified in 'version' field is 'latest'.
ExactVersion *string `json:"exactVersion,omitempty"`
// SharedGalleryImageID - Specified the shared gallery image unique id for vm deployment. This can be fetched from shared gallery image GET call.
SharedGalleryImageID *string `json:"sharedGalleryImageId,omitempty"`
+ // CommunityGalleryImageID - Specified the community gallery image unique id for vm deployment. This can be fetched from community gallery image GET call.
+ CommunityGalleryImageID *string `json:"communityGalleryImageId,omitempty"`
// ID - Resource Id
ID *string `json:"id,omitempty"`
}
@@ -9538,6 +9713,9 @@ func (ir ImageReference) MarshalJSON() ([]byte, error) {
if ir.SharedGalleryImageID != nil {
objectMap["sharedGalleryImageId"] = ir.SharedGalleryImageID
}
+ if ir.CommunityGalleryImageID != nil {
+ objectMap["communityGalleryImageId"] = ir.CommunityGalleryImageID
+ }
if ir.ID != nil {
objectMap["id"] = ir.ID
}
@@ -9753,7 +9931,7 @@ func (is InstanceSku) MarshalJSON() ([]byte, error) {
type InstanceViewStatus struct {
// Code - The status code.
Code *string `json:"code,omitempty"`
- // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError'
+ // Level - The level code. Possible values include: 'Info', 'Warning', 'Error'
Level StatusLevelTypes `json:"level,omitempty"`
// DisplayStatus - The short localizable label for the status.
DisplayStatus *string `json:"displayStatus,omitempty"`
@@ -9879,8 +10057,17 @@ type LinuxParameters struct {
type LinuxPatchSettings struct {
// PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale set with OrchestrationMode as Flexible.
Possible values are:
**ImageDefault** - The virtual machine's default patching configuration is used.
**AutomaticByPlatform** - The virtual machine will be automatically updated by the platform. The property provisionVMAgent must be true. Possible values include: 'LinuxVMGuestPatchModeImageDefault', 'LinuxVMGuestPatchModeAutomaticByPlatform'
PatchMode LinuxVMGuestPatchMode `json:"patchMode,omitempty"`
- // AssessmentMode - Specifies the mode of VM Guest Patch Assessment for the IaaS virtual machine.
Possible values are:
**ImageDefault** - You control the timing of patch assessments on a virtual machine.
**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'LinuxPatchAssessmentModeImageDefault', 'LinuxPatchAssessmentModeAutomaticByPlatform'
+ // AssessmentMode - Specifies the mode of VM Guest Patch Assessment for the IaaS virtual machine.
Possible values are:
**ImageDefault** - You control the timing of patch assessments on a virtual machine.
**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'ImageDefault', 'AutomaticByPlatform'
AssessmentMode LinuxPatchAssessmentMode `json:"assessmentMode,omitempty"`
+ // AutomaticByPlatformSettings - Specifies additional settings for patch mode AutomaticByPlatform in VM Guest Patching on Linux.
+ AutomaticByPlatformSettings *LinuxVMGuestPatchAutomaticByPlatformSettings `json:"automaticByPlatformSettings,omitempty"`
+}
+
+// LinuxVMGuestPatchAutomaticByPlatformSettings specifies additional settings to be applied when patch mode
+// AutomaticByPlatform is selected in Linux patch settings.
+type LinuxVMGuestPatchAutomaticByPlatformSettings struct {
+ // RebootSetting - Specifies the reboot setting for all AutomaticByPlatform patch installation operations. Possible values include: 'LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown', 'LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired', 'LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever', 'LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways'
+ RebootSetting LinuxVMGuestPatchAutomaticByPlatformRebootSetting `json:"rebootSetting,omitempty"`
}
// ListUsagesResult the List Usages operation response.
@@ -10245,10 +10432,12 @@ type ManagedArtifact struct {
// ManagedDiskParameters the parameters of a managed disk.
type ManagedDiskParameters struct {
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
+ // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS', 'StorageAccountTypesPremiumV2LRS'
StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
// DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk.
DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
+ // SecurityProfile - Specifies the security profile for the managed disk.
+ SecurityProfile *VMDiskSecurityProfile `json:"securityProfile,omitempty"`
// ID - Resource Id
ID *string `json:"id,omitempty"`
}
@@ -10309,7 +10498,7 @@ func (nir *NetworkInterfaceReference) UnmarshalJSON(body []byte) error {
type NetworkInterfaceReferenceProperties struct {
// Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
Primary *bool `json:"primary,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
}
@@ -10317,7 +10506,7 @@ type NetworkInterfaceReferenceProperties struct {
type NetworkProfile struct {
// NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine.
NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"`
- // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
+ // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations. Possible values include: 'TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"`
// NetworkInterfaceConfigurations - Specifies the networking configurations that will be used to create the virtual machine networking resources.
NetworkInterfaceConfigurations *[]VirtualMachineNetworkInterfaceConfiguration `json:"networkInterfaceConfigurations,omitempty"`
@@ -10416,17 +10605,17 @@ func (ovd OperationValueDisplay) MarshalJSON() ([]byte, error) {
// OrchestrationServiceStateInput the input for OrchestrationServiceState
type OrchestrationServiceStateInput struct {
- // ServiceName - The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs'
+ // ServiceName - The name of the service. Possible values include: 'AutomaticRepairs'
ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"`
- // Action - The action to be performed. Possible values include: 'OrchestrationServiceStateActionResume', 'OrchestrationServiceStateActionSuspend'
+ // Action - The action to be performed. Possible values include: 'Resume', 'Suspend'
Action OrchestrationServiceStateAction `json:"action,omitempty"`
}
// OrchestrationServiceSummary summary for an orchestration service of a virtual machine scale set.
type OrchestrationServiceSummary struct {
- // ServiceName - READ-ONLY; The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs', 'OrchestrationServiceNamesDummyOrchestrationServiceName'
+ // ServiceName - READ-ONLY; The name of the service. Possible values include: 'AutomaticRepairs', 'DummyOrchestrationServiceName'
ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"`
- // ServiceState - READ-ONLY; The current state of the service. Possible values include: 'OrchestrationServiceStateNotRunning', 'OrchestrationServiceStateRunning', 'OrchestrationServiceStateSuspended'
+ // ServiceState - READ-ONLY; The current state of the service. Possible values include: 'NotRunning', 'Running', 'Suspended'
ServiceState OrchestrationServiceState `json:"serviceState,omitempty"`
}
@@ -10474,10 +10663,20 @@ type OSDiskImage struct {
// OSDiskImageEncryption contains encryption settings for an OS disk image.
type OSDiskImageEncryption struct {
+ // SecurityProfile - This property specifies the security profile of an OS disk image.
+ SecurityProfile *OSDiskImageSecurityProfile `json:"securityProfile,omitempty"`
// DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set.
DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"`
}
+// OSDiskImageSecurityProfile contains security profile for an OS disk image.
+type OSDiskImageSecurityProfile struct {
+ // ConfidentialVMEncryptionType - confidential VM encryption types. Possible values include: 'EncryptedVMGuestStateOnlyWithPmk', 'EncryptedWithPmk', 'EncryptedWithCmk'
+ ConfidentialVMEncryptionType ConfidentialVMEncryptionType `json:"confidentialVMEncryptionType,omitempty"`
+ // SecureVMDiskEncryptionSetID - secure VM disk encryption set id
+ SecureVMDiskEncryptionSetID *string `json:"secureVMDiskEncryptionSetId,omitempty"`
+}
+
// OSFamily describes a cloud service OS family.
type OSFamily struct {
autorest.Response `json:"-"`
@@ -10693,7 +10892,7 @@ type OSProfile struct {
Secrets *[]VaultSecretGroup `json:"secrets,omitempty"`
// AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.
This may only be set to False when no extensions are present on the virtual machine.
AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"`
- // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required to infer provision success of the virtual machine. **Note: This property is for private testing only, and all customers must not set the property to false.**
+ // RequireGuestProvisionSignal - Optional property which must either be set to True or omitted.
RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"`
}
@@ -10948,6 +11147,8 @@ type PatchSettings struct {
EnableHotpatching *bool `json:"enableHotpatching,omitempty"`
// AssessmentMode - Specifies the mode of VM Guest patch assessment for the IaaS virtual machine.
Possible values are:
**ImageDefault** - You control the timing of patch assessments on a virtual machine.
**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'WindowsPatchAssessmentModeImageDefault', 'WindowsPatchAssessmentModeAutomaticByPlatform'
AssessmentMode WindowsPatchAssessmentMode `json:"assessmentMode,omitempty"`
+ // AutomaticByPlatformSettings - Specifies additional settings for patch mode AutomaticByPlatform in VM Guest Patching on Windows.
+ AutomaticByPlatformSettings *WindowsVMGuestPatchAutomaticByPlatformSettings `json:"automaticByPlatformSettings,omitempty"`
}
// PirCommunityGalleryResource base information about the community gallery resource in pir.
@@ -11479,7 +11680,7 @@ func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) {
// PrivateLinkServiceConnectionState a collection of information about the state of the connection between
// service consumer and provider.
type PrivateLinkServiceConnectionState struct {
- // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected'
+ // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'Pending', 'Approved', 'Rejected'
Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"`
// Description - The reason for approval/rejection of the connection.
Description *string `json:"description,omitempty"`
@@ -11498,6 +11699,8 @@ type ProximityPlacementGroup struct {
autorest.Response `json:"-"`
// ProximityPlacementGroupProperties - Describes the properties of a Proximity Placement Group.
*ProximityPlacementGroupProperties `json:"properties,omitempty"`
+ // Zones - Specifies the Availability Zone where virtual machine, virtual machine scale set or availability set associated with the proximity placement group can be created.
+ Zones *[]string `json:"zones,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
@@ -11516,6 +11719,9 @@ func (ppg ProximityPlacementGroup) MarshalJSON() ([]byte, error) {
if ppg.ProximityPlacementGroupProperties != nil {
objectMap["properties"] = ppg.ProximityPlacementGroupProperties
}
+ if ppg.Zones != nil {
+ objectMap["zones"] = ppg.Zones
+ }
if ppg.Location != nil {
objectMap["location"] = ppg.Location
}
@@ -11543,6 +11749,15 @@ func (ppg *ProximityPlacementGroup) UnmarshalJSON(body []byte) error {
}
ppg.ProximityPlacementGroupProperties = &proximityPlacementGroupProperties
}
+ case "zones":
+ if v != nil {
+ var zones []string
+ err = json.Unmarshal(*v, &zones)
+ if err != nil {
+ return err
+ }
+ ppg.Zones = &zones
+ }
case "id":
if v != nil {
var ID string
@@ -11756,7 +11971,7 @@ func NewProximityPlacementGroupListResultPage(cur ProximityPlacementGroupListRes
// ProximityPlacementGroupProperties describes the properties of a Proximity Placement Group.
type ProximityPlacementGroupProperties struct {
- // ProximityPlacementGroupType - Specifies the type of the proximity placement group.
Possible values are:
**Standard** : Co-locate resources within an Azure region or Availability Zone.
**Ultra** : For future use. Possible values include: 'ProximityPlacementGroupTypeStandard', 'ProximityPlacementGroupTypeUltra'
+ // ProximityPlacementGroupType - Specifies the type of the proximity placement group.
Possible values are:
**Standard** : Co-locate resources within an Azure region or Availability Zone.
**Ultra** : For future use. Possible values include: 'Standard', 'Ultra'
ProximityPlacementGroupType ProximityPlacementGroupType `json:"proximityPlacementGroupType,omitempty"`
// VirtualMachines - READ-ONLY; A list of references to all virtual machines in the proximity placement group.
VirtualMachines *[]SubResourceWithColocationStatus `json:"virtualMachines,omitempty"`
@@ -11766,6 +11981,8 @@ type ProximityPlacementGroupProperties struct {
AvailabilitySets *[]SubResourceWithColocationStatus `json:"availabilitySets,omitempty"`
// ColocationStatus - Describes colocation status of the Proximity Placement Group.
ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"`
+ // Intent - Specifies the user intent of the proximity placement group.
+ Intent *ProximityPlacementGroupPropertiesIntent `json:"intent,omitempty"`
}
// MarshalJSON is the custom marshaler for ProximityPlacementGroupProperties.
@@ -11777,9 +11994,18 @@ func (ppgp ProximityPlacementGroupProperties) MarshalJSON() ([]byte, error) {
if ppgp.ColocationStatus != nil {
objectMap["colocationStatus"] = ppgp.ColocationStatus
}
+ if ppgp.Intent != nil {
+ objectMap["intent"] = ppgp.Intent
+ }
return json.Marshal(objectMap)
}
+// ProximityPlacementGroupPropertiesIntent specifies the user intent of the proximity placement group.
+type ProximityPlacementGroupPropertiesIntent struct {
+ // VMSizes - Specifies possible sizes of virtual machines that can be created in the proximity placement group.
+ VMSizes *[]string `json:"vmSizes,omitempty"`
+}
+
// ProximityPlacementGroupUpdate specifies information about the proximity placement group.
type ProximityPlacementGroupUpdate struct {
// Tags - Resource tags
@@ -11828,11 +12054,11 @@ func (pr ProxyResource) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// PublicIPAddressSku describes the public IP Sku
+// PublicIPAddressSku describes the public IP Sku. It can only be set with OrchestrationMode as Flexible.
type PublicIPAddressSku struct {
// Name - Specify public IP sku name. Possible values include: 'PublicIPAddressSkuNameBasic', 'PublicIPAddressSkuNameStandard'
Name PublicIPAddressSkuName `json:"name,omitempty"`
- // Tier - Specify public IP sku tier. Possible values include: 'PublicIPAddressSkuTierRegional', 'PublicIPAddressSkuTierGlobal'
+ // Tier - Specify public IP sku tier. Possible values include: 'Regional', 'Global'
Tier PublicIPAddressSkuTier `json:"tier,omitempty"`
}
@@ -11894,9 +12120,19 @@ func (rrs RegionalReplicationStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
+// RegionalSharingStatus gallery regional sharing status
+type RegionalSharingStatus struct {
+ // Region - Region name
+ Region *string `json:"region,omitempty"`
+ // State - Gallery sharing state in current region. Possible values include: 'SharingStateSucceeded', 'SharingStateInProgress', 'SharingStateFailed', 'SharingStateUnknown'
+ State SharingState `json:"state,omitempty"`
+ // Details - Details of gallery regional sharing failure.
+ Details *string `json:"details,omitempty"`
+}
+
// ReplicationStatus this is the replication status of the gallery image version.
type ReplicationStatus struct {
- // AggregatedState - READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'AggregatedReplicationStateUnknown', 'AggregatedReplicationStateInProgress', 'AggregatedReplicationStateCompleted', 'AggregatedReplicationStateFailed'
+ // AggregatedState - READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'Unknown', 'InProgress', 'Completed', 'Failed'
AggregatedState AggregatedReplicationState `json:"aggregatedState,omitempty"`
// Summary - READ-ONLY; This is a summary of replication status for each region.
Summary *[]RegionalReplicationStatus `json:"summary,omitempty"`
@@ -11910,7 +12146,7 @@ func (rs ReplicationStatus) MarshalJSON() ([]byte, error) {
// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api.
type RequestRateByIntervalInput struct {
- // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'IntervalInMinsThreeMins', 'IntervalInMinsFiveMins', 'IntervalInMinsThirtyMins', 'IntervalInMinsSixtyMins'
+ // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'ThreeMins', 'FiveMins', 'ThirtyMins', 'SixtyMins'
IntervalLength IntervalInMins `json:"intervalLength,omitempty"`
// BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"`
@@ -11966,7 +12202,7 @@ type ResourceInstanceViewStatus struct {
Message *string `json:"message,omitempty"`
// Time - READ-ONLY; The time of the status.
Time *date.Time `json:"time,omitempty"`
- // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError'
+ // Level - The level code. Possible values include: 'Info', 'Warning', 'Error'
Level StatusLevelTypes `json:"level,omitempty"`
}
@@ -12081,7 +12317,7 @@ type ResourceSkuLocationInfo struct {
ZoneDetails *[]ResourceSkuZoneDetails `json:"zoneDetails,omitempty"`
// ExtendedLocations - READ-ONLY; The names of extended locations.
ExtendedLocations *[]string `json:"extendedLocations,omitempty"`
- // Type - READ-ONLY; The type of the extended location. Possible values include: 'ExtendedLocationTypeEdgeZone'
+ // Type - READ-ONLY; The type of the extended location. Possible values include: 'EdgeZone'
Type ExtendedLocationType `json:"type,omitempty"`
}
@@ -12107,13 +12343,13 @@ func (rsri ResourceSkuRestrictionInfo) MarshalJSON() ([]byte, error) {
// ResourceSkuRestrictions describes scaling information of a SKU.
type ResourceSkuRestrictions struct {
- // Type - READ-ONLY; The type of restrictions. Possible values include: 'ResourceSkuRestrictionsTypeLocation', 'ResourceSkuRestrictionsTypeZone'
+ // Type - READ-ONLY; The type of restrictions. Possible values include: 'Location', 'Zone'
Type ResourceSkuRestrictionsType `json:"type,omitempty"`
// Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
Values *[]string `json:"values,omitempty"`
// RestrictionInfo - READ-ONLY; The information about the restriction where the SKU cannot be used.
RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"`
- // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'ResourceSkuRestrictionsReasonCodeQuotaID', 'ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription'
+ // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'QuotaID', 'NotAvailableForSubscription'
ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"`
}
@@ -12455,6 +12691,32 @@ func NewResourceURIListPage(cur ResourceURIList, getNextPage func(context.Contex
}
}
+// ResourceWithOptionalLocation the Resource model definition with location property as optional.
+type ResourceWithOptionalLocation struct {
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ResourceWithOptionalLocation.
+func (rwol ResourceWithOptionalLocation) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if rwol.Location != nil {
+ objectMap["location"] = rwol.Location
+ }
+ if rwol.Tags != nil {
+ objectMap["tags"] = rwol.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
// RestorePoint restore Point details.
type RestorePoint struct {
autorest.Response `json:"-"`
@@ -12914,6 +13176,14 @@ func (rpcu *RestorePointCollectionUpdate) UnmarshalJSON(body []byte) error {
return nil
}
+// RestorePointInstanceView the instance view of a restore point.
+type RestorePointInstanceView struct {
+ // DiskRestorePoints - The disk restore points information.
+ DiskRestorePoints *[]DiskRestorePointInstanceView `json:"diskRestorePoints,omitempty"`
+ // Statuses - The resource status information.
+ Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
+}
+
// RestorePointProperties the restore point properties.
type RestorePointProperties struct {
// ExcludeDisks - List of disk resource ids that the customer wishes to exclude from the restore point. If no disks are specified, all disks will be included.
@@ -12922,10 +13192,14 @@ type RestorePointProperties struct {
SourceMetadata *RestorePointSourceMetadata `json:"sourceMetadata,omitempty"`
// ProvisioningState - READ-ONLY; Gets the provisioning state of the restore point.
ProvisioningState *string `json:"provisioningState,omitempty"`
- // ConsistencyMode - READ-ONLY; Gets the consistency mode for the restore point. Please refer to https://aka.ms/RestorePoints for more details. Possible values include: 'ConsistencyModeTypesCrashConsistent', 'ConsistencyModeTypesFileSystemConsistent', 'ConsistencyModeTypesApplicationConsistent'
+ // ConsistencyMode - ConsistencyMode of the RestorePoint. Can be specified in the input while creating a restore point. For now, only CrashConsistent is accepted as a valid input. Please refer to https://aka.ms/RestorePoints for more details. Possible values include: 'CrashConsistent', 'FileSystemConsistent', 'ApplicationConsistent'
ConsistencyMode ConsistencyModeTypes `json:"consistencyMode,omitempty"`
- // ProvisioningDetails - READ-ONLY; Gets the provisioning details set by the server during Create restore point operation.
- ProvisioningDetails *RestorePointProvisioningDetails `json:"provisioningDetails,omitempty"`
+ // TimeCreated - Gets the creation time of the restore point.
+ TimeCreated *date.Time `json:"timeCreated,omitempty"`
+ // SourceRestorePoint - Resource Id of the source restore point from which a copy needs to be created.
+ SourceRestorePoint *APIEntityReference `json:"sourceRestorePoint,omitempty"`
+ // InstanceView - READ-ONLY; The restore point instance view.
+ InstanceView *RestorePointInstanceView `json:"instanceView,omitempty"`
}
// MarshalJSON is the custom marshaler for RestorePointProperties.
@@ -12934,21 +13208,18 @@ func (rpp RestorePointProperties) MarshalJSON() ([]byte, error) {
if rpp.ExcludeDisks != nil {
objectMap["excludeDisks"] = rpp.ExcludeDisks
}
+ if rpp.ConsistencyMode != "" {
+ objectMap["consistencyMode"] = rpp.ConsistencyMode
+ }
+ if rpp.TimeCreated != nil {
+ objectMap["timeCreated"] = rpp.TimeCreated
+ }
+ if rpp.SourceRestorePoint != nil {
+ objectMap["sourceRestorePoint"] = rpp.SourceRestorePoint
+ }
return json.Marshal(objectMap)
}
-// RestorePointProvisioningDetails restore Point Provisioning details.
-type RestorePointProvisioningDetails struct {
- // CreationTime - Gets the creation time of the restore point.
- CreationTime *date.Time `json:"creationTime,omitempty"`
- // TotalUsedSizeInBytes - Gets the total size of the data in all the disks which are part of the restore point.
- TotalUsedSizeInBytes *int64 `json:"totalUsedSizeInBytes,omitempty"`
- // StatusCode - Gets the status of the Create restore point operation.
- StatusCode *int32 `json:"statusCode,omitempty"`
- // StatusMessage - Gets the status message of the Create restore point operation.
- StatusMessage *string `json:"statusMessage,omitempty"`
-}
-
// RestorePointsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type RestorePointsCreateFuture struct {
@@ -13069,7 +13340,7 @@ type RestorePointSourceVMDataDisk struct {
// RestorePointSourceVMOSDisk describes an Operating System disk.
type RestorePointSourceVMOSDisk struct {
- // OsType - Gets the Operating System type. Possible values include: 'OperatingSystemTypeWindows', 'OperatingSystemTypeLinux'
+ // OsType - Gets the Operating System type. Possible values include: 'Windows', 'Linux'
OsType OperatingSystemType `json:"osType,omitempty"`
// EncryptionSettings - Gets the disk encryption settings.
EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"`
@@ -13394,7 +13665,7 @@ type RollingUpgradeRunningStatus struct {
Code RollingUpgradeStatusCode `json:"code,omitempty"`
// StartTime - READ-ONLY; Start time of the upgrade.
StartTime *date.Time `json:"startTime,omitempty"`
- // LastAction - READ-ONLY; The last action performed on the rolling upgrade. Possible values include: 'RollingUpgradeActionTypeStart', 'RollingUpgradeActionTypeCancel'
+ // LastAction - READ-ONLY; The last action performed on the rolling upgrade. Possible values include: 'Start', 'Cancel'
LastAction RollingUpgradeActionType `json:"lastAction,omitempty"`
// LastActionTime - READ-ONLY; Last action time of the upgrade.
LastActionTime *date.Time `json:"lastActionTime,omitempty"`
@@ -13774,7 +14045,7 @@ type SecurityProfile struct {
UefiSettings *UefiSettings `json:"uefiSettings,omitempty"`
// EncryptionAtHost - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself.
Default: The Encryption at host will be disabled unless this property is set to true for the resource.
EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"`
- // SecurityType - Specifies the SecurityType of the virtual machine. It is set as TrustedLaunch to enable UefiSettings.
Default: UefiSettings will not be enabled unless this property is set as TrustedLaunch. Possible values include: 'SecurityTypesTrustedLaunch'
+ // SecurityType - Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings.
Default: UefiSettings will not be enabled unless this property is set. Possible values include: 'SecurityTypesTrustedLaunch', 'SecurityTypesConfidentialVM'
SecurityType SecurityTypes `json:"securityType,omitempty"`
}
@@ -14083,14 +14354,14 @@ func NewSharedGalleryImageListPage(cur SharedGalleryImageList, getNextPage func(
type SharedGalleryImageProperties struct {
// OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
+ // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'Generalized', 'Specialized'
OsState OperatingSystemStateTypes `json:"osState,omitempty"`
// EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable.
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
Identifier *GalleryImageIdentifier `json:"identifier,omitempty"`
Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"`
Disallowed *Disallowed `json:"disallowed,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
+ // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2'
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// Features - A list of gallery image features.
Features *[]GalleryImageFeature `json:"features,omitempty"`
@@ -14513,10 +14784,12 @@ func (sie ShareInfoElement) MarshalJSON() ([]byte, error) {
// SharingProfile profile for gallery sharing to subscription or tenant
type SharingProfile struct {
- // Permissions - This property allows you to specify the permission of sharing gallery.
Possible values are:
**Private**
**Groups**. Possible values include: 'GallerySharingPermissionTypesPrivate', 'GallerySharingPermissionTypesGroups'
+ // Permissions - This property allows you to specify the permission of sharing gallery.
Possible values are:
**Private**
**Groups**. Possible values include: 'Private', 'Groups'
Permissions GallerySharingPermissionTypes `json:"permissions,omitempty"`
// Groups - READ-ONLY; A list of sharing profile groups.
Groups *[]SharingProfileGroup `json:"groups,omitempty"`
+ // CommunityGalleryInfo - Information of community gallery if current gallery is shared to community.
+ CommunityGalleryInfo *CommunityGalleryInfo `json:"communityGalleryInfo,omitempty"`
}
// MarshalJSON is the custom marshaler for SharingProfile.
@@ -14525,21 +14798,32 @@ func (sp SharingProfile) MarshalJSON() ([]byte, error) {
if sp.Permissions != "" {
objectMap["permissions"] = sp.Permissions
}
+ if sp.CommunityGalleryInfo != nil {
+ objectMap["communityGalleryInfo"] = sp.CommunityGalleryInfo
+ }
return json.Marshal(objectMap)
}
// SharingProfileGroup group of the gallery sharing profile
type SharingProfileGroup struct {
- // Type - This property allows you to specify the type of sharing group.
Possible values are:
**Subscriptions**
**AADTenants**. Possible values include: 'SharingProfileGroupTypesSubscriptions', 'SharingProfileGroupTypesAADTenants'
+ // Type - This property allows you to specify the type of sharing group.
Possible values are:
**Subscriptions**
**AADTenants**
**Community**. Possible values include: 'Subscriptions', 'AADTenants', 'Community'
Type SharingProfileGroupTypes `json:"type,omitempty"`
// Ids - A list of subscription/tenant ids the gallery is aimed to be shared to.
Ids *[]string `json:"ids,omitempty"`
}
+// SharingStatus sharing status of current gallery.
+type SharingStatus struct {
+ // AggregatedState - Aggregated sharing state of current gallery. Possible values include: 'SharingStateSucceeded', 'SharingStateInProgress', 'SharingStateFailed', 'SharingStateUnknown'
+ AggregatedState SharingState `json:"aggregatedState,omitempty"`
+ // Summary - Summary of all regional sharing status.
+ Summary *[]RegionalSharingStatus `json:"summary,omitempty"`
+}
+
// SharingUpdate specifies information about the gallery sharing profile update.
type SharingUpdate struct {
autorest.Response `json:"-"`
- // OperationType - This property allows you to specify the operation type of gallery sharing update.
Possible values are:
**Add**
**Remove**
**Reset**. Possible values include: 'SharingUpdateOperationTypesAdd', 'SharingUpdateOperationTypesRemove', 'SharingUpdateOperationTypesReset'
+ // OperationType - This property allows you to specify the operation type of gallery sharing update.
Possible values are:
**Add**
**Remove**
**Reset**. Possible values include: 'Add', 'Remove', 'Reset', 'EnableCommunity'
OperationType SharingUpdateOperationTypes `json:"operationType,omitempty"`
// Groups - A list of sharing profile groups.
Groups *[]SharingProfileGroup `json:"groups,omitempty"`
@@ -14860,11 +15144,11 @@ type SnapshotProperties struct {
TimeCreated *date.Time `json:"timeCreated,omitempty"`
// OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
+ // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2'
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// PurchasePlan - Purchase plan information for the image from which the source disk for the snapshot was originally created.
PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities (like Accelerated Networking) for the image from which the source disk from the snapshot was originally created.
+ // SupportedCapabilities - List of supported capabilities for the image from which the source disk from the snapshot was originally created.
SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
// CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
CreationData *CreationData `json:"creationData,omitempty"`
@@ -14872,7 +15156,7 @@ type SnapshotProperties struct {
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
// DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
- // DiskState - The state of the snapshot. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateFrozen', 'DiskStateActiveSAS', 'DiskStateActiveSASFrozen', 'DiskStateReadyToUpload', 'DiskStateActiveUpload'
+ // DiskState - The state of the snapshot. Possible values include: 'Unattached', 'Attached', 'Reserved', 'Frozen', 'ActiveSAS', 'ActiveSASFrozen', 'ReadyToUpload', 'ActiveUpload'
DiskState DiskState `json:"diskState,omitempty"`
// UniqueID - READ-ONLY; Unique Guid identifying the resource.
UniqueID *string `json:"uniqueId,omitempty"`
@@ -14884,16 +15168,20 @@ type SnapshotProperties struct {
Incremental *bool `json:"incremental,omitempty"`
// Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
Encryption *Encryption `json:"encryption,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
+ // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll'
NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
// DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
DiskAccessID *string `json:"diskAccessId,omitempty"`
+ // SecurityProfile - Contains the security related information for the resource.
+ SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"`
// SupportsHibernation - Indicates the OS on a snapshot supports hibernation.
SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled'
PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
// CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation.
CompletionPercent *float64 `json:"completionPercent,omitempty"`
+ // DataAccessAuthMode - Possible values include: 'DataAccessAuthModeAzureActiveDirectory', 'DataAccessAuthModeNone'
+ DataAccessAuthMode DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"`
}
// MarshalJSON is the custom marshaler for SnapshotProperties.
@@ -14935,6 +15223,9 @@ func (sp SnapshotProperties) MarshalJSON() ([]byte, error) {
if sp.DiskAccessID != nil {
objectMap["diskAccessId"] = sp.DiskAccessID
}
+ if sp.SecurityProfile != nil {
+ objectMap["securityProfile"] = sp.SecurityProfile
+ }
if sp.SupportsHibernation != nil {
objectMap["supportsHibernation"] = sp.SupportsHibernation
}
@@ -14944,6 +15235,9 @@ func (sp SnapshotProperties) MarshalJSON() ([]byte, error) {
if sp.CompletionPercent != nil {
objectMap["completionPercent"] = sp.CompletionPercent
}
+ if sp.DataAccessAuthMode != "" {
+ objectMap["dataAccessAuthMode"] = sp.DataAccessAuthMode
+ }
return json.Marshal(objectMap)
}
@@ -15244,14 +15538,18 @@ type SnapshotUpdateProperties struct {
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
// Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
Encryption *Encryption `json:"encryption,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
+ // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll'
NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
// DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
DiskAccessID *string `json:"diskAccessId,omitempty"`
// SupportsHibernation - Indicates the OS on a snapshot supports hibernation.
SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled'
PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
+ // DataAccessAuthMode - Possible values include: 'DataAccessAuthModeAzureActiveDirectory', 'DataAccessAuthModeNone'
+ DataAccessAuthMode DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"`
+ // SupportedCapabilities - List of supported capabilities for the image from which the OS disk was created.
+ SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
}
// SoftDeletePolicy contains information about the soft deletion policy of the gallery.
@@ -15673,11 +15971,12 @@ type SubResourceWithColocationStatus struct {
ID *string `json:"id,omitempty"`
}
-// SupportedCapabilities list of supported capabilities (like accelerated networking) persisted on the disk
-// resource for VM use.
+// SupportedCapabilities list of supported capabilities persisted on the disk resource for VM use.
type SupportedCapabilities struct {
// AcceleratedNetwork - True if the image from which the OS disk is created supports accelerated networking.
AcceleratedNetwork *bool `json:"acceleratedNetwork,omitempty"`
+ // Architecture - CPU architecture supported by an OS disk. Possible values include: 'X64', 'Arm64'
+ Architecture Architecture `json:"architecture,omitempty"`
}
// TargetRegion describes the target region information.
@@ -16039,6 +16338,20 @@ type UserArtifactSource struct {
DefaultConfigurationLink *string `json:"defaultConfigurationLink,omitempty"`
}
+// UserAssignedIdentitiesValue ...
+type UserAssignedIdentitiesValue struct {
+ // PrincipalID - READ-ONLY; The principal id of user assigned identity.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // ClientID - READ-ONLY; The client id of user assigned identity.
+ ClientID *string `json:"clientId,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for UserAssignedIdentitiesValue.
+func (uaiv UserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate
// should reside on the VM.
type VaultCertificate struct {
@@ -16304,14 +16617,14 @@ func (vmcr VirtualMachineCaptureResult) MarshalJSON() ([]byte, error) {
type VirtualMachineExtension struct {
autorest.Response `json:"-"`
*VirtualMachineExtensionProperties `json:"properties,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
}
@@ -16349,6 +16662,15 @@ func (vme *VirtualMachineExtension) UnmarshalJSON(body []byte) error {
}
vme.VirtualMachineExtensionProperties = &virtualMachineExtensionProperties
}
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ vme.Location = &location
+ }
case "id":
if v != nil {
var ID string
@@ -16376,15 +16698,6 @@ func (vme *VirtualMachineExtension) UnmarshalJSON(body []byte) error {
}
vme.Type = &typeVar
}
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vme.Location = &location
- }
case "tags":
if v != nil {
var tags map[string]*string
@@ -16562,6 +16875,8 @@ type VirtualMachineExtensionProperties struct {
InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"`
// SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
SuppressFailures *bool `json:"suppressFailures,omitempty"`
+ // ProtectedSettingsFromKeyVault - The extensions protected settings that are passed by reference, and consumed from key vault
+ ProtectedSettingsFromKeyVault interface{} `json:"protectedSettingsFromKeyVault,omitempty"`
}
// MarshalJSON is the custom marshaler for VirtualMachineExtensionProperties.
@@ -16597,6 +16912,9 @@ func (vmep VirtualMachineExtensionProperties) MarshalJSON() ([]byte, error) {
if vmep.SuppressFailures != nil {
objectMap["suppressFailures"] = vmep.SuppressFailures
}
+ if vmep.ProtectedSettingsFromKeyVault != nil {
+ objectMap["protectedSettingsFromKeyVault"] = vmep.ProtectedSettingsFromKeyVault
+ }
return json.Marshal(objectMap)
}
@@ -16802,6 +17120,8 @@ type VirtualMachineExtensionUpdateProperties struct {
ProtectedSettings interface{} `json:"protectedSettings,omitempty"`
// SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
SuppressFailures *bool `json:"suppressFailures,omitempty"`
+ // ProtectedSettingsFromKeyVault - The extensions protected settings that are passed by reference, and consumed from key vault
+ ProtectedSettingsFromKeyVault interface{} `json:"protectedSettingsFromKeyVault,omitempty"`
}
// VirtualMachineHealthStatus the health status of the VM.
@@ -16825,7 +17145,7 @@ type VirtualMachineIdentity struct {
// Type - The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone'
Type ResourceIdentityType `json:"type,omitempty"`
// UserAssignedIdentities - The list of user identities associated with the Virtual Machine. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
- UserAssignedIdentities map[string]*VirtualMachineIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"`
+ UserAssignedIdentities map[string]*UserAssignedIdentitiesValue `json:"userAssignedIdentities"`
}
// MarshalJSON is the custom marshaler for VirtualMachineIdentity.
@@ -16840,20 +17160,6 @@ func (vmi VirtualMachineIdentity) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// VirtualMachineIdentityUserAssignedIdentitiesValue ...
-type VirtualMachineIdentityUserAssignedIdentitiesValue struct {
- // PrincipalID - READ-ONLY; The principal id of user assigned identity.
- PrincipalID *string `json:"principalId,omitempty"`
- // ClientID - READ-ONLY; The client id of user assigned identity.
- ClientID *string `json:"clientId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineIdentityUserAssignedIdentitiesValue.
-func (vmiAiv VirtualMachineIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
// VirtualMachineImage describes a Virtual Machine Image.
type VirtualMachineImage struct {
autorest.Response `json:"-"`
@@ -16982,6 +17288,8 @@ type VirtualMachineImageProperties struct {
// Disallowed - Specifies disallowed configuration for the VirtualMachine created from the image
Disallowed *DisallowedConfiguration `json:"disallowed,omitempty"`
Features *[]VirtualMachineImageFeature `json:"features,omitempty"`
+ // Architecture - Possible values include: 'ArchitectureTypesX64', 'ArchitectureTypesArm64'
+ Architecture ArchitectureTypes `json:"architecture,omitempty"`
}
// VirtualMachineImageResource virtual machine image resource information.
@@ -17023,7 +17331,7 @@ func (vmir VirtualMachineImageResource) MarshalJSON() ([]byte, error) {
type VirtualMachineInstallPatchesParameters struct {
// MaximumDuration - Specifies the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)
MaximumDuration *string `json:"maximumDuration,omitempty"`
- // RebootSetting - Defines when it is acceptable to reboot a VM during a software update operation. Possible values include: 'VMGuestPatchRebootSettingIfRequired', 'VMGuestPatchRebootSettingNever', 'VMGuestPatchRebootSettingAlways'
+ // RebootSetting - Defines when it is acceptable to reboot a VM during a software update operation. Possible values include: 'IfRequired', 'Never', 'Always'
RebootSetting VMGuestPatchRebootSetting `json:"rebootSetting,omitempty"`
// WindowsParameters - Input for InstallPatches on a Windows VM, as directly received by the API
WindowsParameters *WindowsParameters `json:"windowsParameters,omitempty"`
@@ -17376,7 +17684,7 @@ func (vmnic *VirtualMachineNetworkInterfaceConfiguration) UnmarshalJSON(body []b
type VirtualMachineNetworkInterfaceConfigurationProperties struct {
// Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
Primary *bool `json:"primary,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
// EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled.
EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"`
@@ -17516,7 +17824,7 @@ type VirtualMachineProperties struct {
VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"`
// ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine should be assigned to.
Minimum api-version: 2018-04-01.
ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
- // Priority - Specifies the priority for the virtual machine.
Minimum api-version: 2019-03-01. Possible values include: 'VirtualMachinePriorityTypesRegular', 'VirtualMachinePriorityTypesLow', 'VirtualMachinePriorityTypesSpot'
+ // Priority - Specifies the priority for the virtual machine.
Minimum api-version: 2019-03-01. Possible values include: 'Regular', 'Low', 'Spot'
Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
// EvictionPolicy - Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set.
For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. Possible values include: 'VirtualMachineEvictionPolicyTypesDeallocate', 'VirtualMachineEvictionPolicyTypesDelete'
EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
@@ -17546,6 +17854,8 @@ type VirtualMachineProperties struct {
CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"`
// ApplicationProfile - Specifies the gallery applications that should be made available to the VM/VMSS
ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"`
+ // TimeCreated - READ-ONLY; Specifies the time at which the Virtual Machine resource was created.
Minimum api-version: 2022-03-01.
+ TimeCreated *date.Time `json:"timeCreated,omitempty"`
}
// MarshalJSON is the custom marshaler for VirtualMachineProperties.
@@ -17691,7 +18001,7 @@ func (vmpiac *VirtualMachinePublicIPAddressConfiguration) UnmarshalJSON(body []b
type VirtualMachinePublicIPAddressConfigurationProperties struct {
// IdleTimeoutInMinutes - The idle timeout of the public IP address.
IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"`
- // DeleteOption - Specify what happens to the public IP address when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // DeleteOption - Specify what happens to the public IP address when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
// DNSSettings - The dns settings to be applied on the publicIP addresses .
DNSSettings *VirtualMachinePublicIPAddressDNSSettingsConfiguration `json:"dnsSettings,omitempty"`
@@ -17701,7 +18011,7 @@ type VirtualMachinePublicIPAddressConfigurationProperties struct {
PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"`
// PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionsIPv4', 'IPVersionsIPv6'
PublicIPAddressVersion IPVersions `json:"publicIPAddressVersion,omitempty"`
- // PublicIPAllocationMethod - Specify the public IP allocation type. Possible values include: 'PublicIPAllocationMethodDynamic', 'PublicIPAllocationMethodStatic'
+ // PublicIPAllocationMethod - Specify the public IP allocation type. Possible values include: 'Dynamic', 'Static'
PublicIPAllocationMethod PublicIPAllocationMethod `json:"publicIPAllocationMethod,omitempty"`
}
@@ -18476,6 +18786,8 @@ type VirtualMachineScaleSetDataDisk struct {
DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"`
// DiskMBpsReadWrite - Specifies the bandwidth in MB per second for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.
DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"`
+ // DeleteOption - Specifies whether data disk should be deleted or detached upon VMSS Flex deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
Possible values:
**Delete** If this value is used, the data disk is deleted when the VMSS Flex VM is deleted.
**Detach** If this value is used, the data disk is retained after VMSS Flex VM is deleted.
The default value is set to **Delete**. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach'
+ DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"`
}
// VirtualMachineScaleSetExtension describes a Virtual Machine Scale Set Extension.
@@ -18746,6 +19058,8 @@ type VirtualMachineScaleSetExtensionProperties struct {
ProvisionAfterExtensions *[]string `json:"provisionAfterExtensions,omitempty"`
// SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
SuppressFailures *bool `json:"suppressFailures,omitempty"`
+ // ProtectedSettingsFromKeyVault - The extensions protected settings that are passed by reference, and consumed from key vault
+ ProtectedSettingsFromKeyVault interface{} `json:"protectedSettingsFromKeyVault,omitempty"`
}
// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtensionProperties.
@@ -18781,6 +19095,9 @@ func (vmssep VirtualMachineScaleSetExtensionProperties) MarshalJSON() ([]byte, e
if vmssep.SuppressFailures != nil {
objectMap["suppressFailures"] = vmssep.SuppressFailures
}
+ if vmssep.ProtectedSettingsFromKeyVault != nil {
+ objectMap["protectedSettingsFromKeyVault"] = vmssep.ProtectedSettingsFromKeyVault
+ }
return json.Marshal(objectMap)
}
@@ -18978,6 +19295,12 @@ func (vmsseu *VirtualMachineScaleSetExtensionUpdate) UnmarshalJSON(body []byte)
return nil
}
+// VirtualMachineScaleSetHardwareProfile specifies the hardware settings for the virtual machine scale set.
+type VirtualMachineScaleSetHardwareProfile struct {
+ // VMSizeProperties - Specifies the properties for customizing the size of the virtual machine. Minimum api-version: 2022-03-01.
Please follow the instructions in [VM Customization](https://aka.ms/vmcustomization) for more details.
+ VMSizeProperties *VMSizeProperties `json:"vmSizeProperties,omitempty"`
+}
+
// VirtualMachineScaleSetIdentity identity for the virtual machine scale set.
type VirtualMachineScaleSetIdentity struct {
// PrincipalID - READ-ONLY; The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity.
@@ -18987,7 +19310,7 @@ type VirtualMachineScaleSetIdentity struct {
// Type - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine scale set. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone'
Type ResourceIdentityType `json:"type,omitempty"`
// UserAssignedIdentities - The list of user identities associated with the virtual machine scale set. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
- UserAssignedIdentities map[string]*VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"`
+ UserAssignedIdentities map[string]*UserAssignedIdentitiesValue `json:"userAssignedIdentities"`
}
// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentity.
@@ -19002,20 +19325,6 @@ func (vmssi VirtualMachineScaleSetIdentity) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue ...
-type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue struct {
- // PrincipalID - READ-ONLY; The principal id of user assigned identity.
- PrincipalID *string `json:"principalId,omitempty"`
- // ClientID - READ-ONLY; The client id of user assigned identity.
- ClientID *string `json:"clientId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue.
-func (vmssiAiv VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
// VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set.
type VirtualMachineScaleSetInstanceView struct {
autorest.Response `json:"-"`
@@ -19127,7 +19436,7 @@ type VirtualMachineScaleSetIPConfigurationProperties struct {
Primary *bool `json:"primary,omitempty"`
// PublicIPAddressConfiguration - The publicIPAddressConfiguration.
PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"`
- // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6'
+ // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6'
PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"`
// ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway.
ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"`
@@ -19791,10 +20100,12 @@ func NewVirtualMachineScaleSetListWithLinkResultPage(cur VirtualMachineScaleSetL
// VirtualMachineScaleSetManagedDiskParameters describes the parameters of a ScaleSet managed disk.
type VirtualMachineScaleSetManagedDiskParameters struct {
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
+ // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS', 'StorageAccountTypesPremiumV2LRS'
StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
// DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk.
DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
+ // SecurityProfile - Specifies the security profile for the managed disk.
+ SecurityProfile *VMDiskSecurityProfile `json:"securityProfile,omitempty"`
}
// VirtualMachineScaleSetNetworkConfiguration describes a virtual machine scale set network profile's
@@ -19888,7 +20199,7 @@ type VirtualMachineScaleSetNetworkConfigurationProperties struct {
IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"`
// EnableIPForwarding - Whether IP forwarding enabled on this NIC.
EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
}
@@ -19898,7 +20209,7 @@ type VirtualMachineScaleSetNetworkProfile struct {
HealthProbe *APIEntityReference `json:"healthProbe,omitempty"`
// NetworkInterfaceConfigurations - The list of network configurations.
NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"`
- // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
+ // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"`
}
@@ -19924,6 +20235,8 @@ type VirtualMachineScaleSetOSDisk struct {
VhdContainers *[]string `json:"vhdContainers,omitempty"`
// ManagedDisk - The managed disk parameters.
ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
+ // DeleteOption - Specifies whether OS Disk should be deleted or detached upon VMSS Flex deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
Possible values:
**Delete** If this value is used, the OS disk is deleted when VMSS Flex VM is deleted.
**Detach** If this value is used, the OS disk is retained after VMSS Flex VM is deleted.
The default value is set to **Delete**. For an Ephemeral OS Disk, the default value is set to **Delete**. User cannot change the delete option for Ephemeral OS Disk. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach'
+ DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"`
}
// VirtualMachineScaleSetOSProfile describes a virtual machine scale set OS profile.
@@ -19942,6 +20255,8 @@ type VirtualMachineScaleSetOSProfile struct {
LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"`
// Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows).
Secrets *[]VaultSecretGroup `json:"secrets,omitempty"`
+ // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine scale set.
This may only be set to False when no extensions are present on the virtual machine scale set.
+ AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"`
}
// VirtualMachineScaleSetProperties describes the properties of a Virtual Machine Scale Set.
@@ -19974,10 +20289,12 @@ type VirtualMachineScaleSetProperties struct {
AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"`
// ScaleInPolicy - Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set.
ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"`
- // OrchestrationMode - Specifies the orchestration mode for the virtual machine scale set. Possible values include: 'OrchestrationModeUniform', 'OrchestrationModeFlexible'
+ // OrchestrationMode - Specifies the orchestration mode for the virtual machine scale set. Possible values include: 'Uniform', 'Flexible'
OrchestrationMode OrchestrationMode `json:"orchestrationMode,omitempty"`
// SpotRestorePolicy - Specifies the Spot Restore properties for the virtual machine scale set.
SpotRestorePolicy *SpotRestorePolicy `json:"spotRestorePolicy,omitempty"`
+ // TimeCreated - READ-ONLY; Specifies the time at which the Virtual Machine Scale Set resource was created.
Minimum api-version: 2022-03-01.
+ TimeCreated *date.Time `json:"timeCreated,omitempty"`
}
// MarshalJSON is the custom marshaler for VirtualMachineScaleSetProperties.
@@ -20112,9 +20429,9 @@ type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct {
IPTags *[]VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"`
// PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses.
PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"`
- // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6'
+ // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6'
PublicIPAddressVersion IPVersion `json:"publicIPAddressVersion,omitempty"`
- // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
}
@@ -20982,7 +21299,7 @@ type VirtualMachineScaleSetUpdateIPConfigurationProperties struct {
Primary *bool `json:"primary,omitempty"`
// PublicIPAddressConfiguration - The publicIPAddressConfiguration.
PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"`
- // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6'
+ // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6'
PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"`
// ApplicationGatewayBackendAddressPools - The application gateway backend address pools.
ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"`
@@ -21079,7 +21396,7 @@ type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct {
IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"`
// EnableIPForwarding - Whether IP forwarding enabled on this NIC.
EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
}
@@ -21089,7 +21406,7 @@ type VirtualMachineScaleSetUpdateNetworkProfile struct {
HealthProbe *APIEntityReference `json:"healthProbe,omitempty"`
// NetworkInterfaceConfigurations - The list of network configurations.
NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"`
- // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
+ // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"`
}
@@ -21108,6 +21425,8 @@ type VirtualMachineScaleSetUpdateOSDisk struct {
VhdContainers *[]string `json:"vhdContainers,omitempty"`
// ManagedDisk - The managed disk parameters.
ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
+ // DeleteOption - Specifies whether OS Disk should be deleted or detached upon VMSS Flex deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
Possible values:
**Delete** If this value is used, the OS disk is deleted when VMSS Flex VM is deleted.
**Detach** If this value is used, the OS disk is retained after VMSS Flex VM is deleted.
The default value is set to **Delete**. For an Ephemeral OS Disk, the default value is set to **Delete**. User cannot change the delete option for Ephemeral OS Disk. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach'
+ DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"`
}
// VirtualMachineScaleSetUpdateOSProfile describes a virtual machine scale set OS profile.
@@ -21204,7 +21523,9 @@ type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct {
IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"`
// DNSSettings - The dns settings to be applied on the publicIP addresses .
DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"`
- // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
+ // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses.
+ PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"`
+ // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'Delete', 'Detach'
DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
}
@@ -21256,6 +21577,8 @@ type VirtualMachineScaleSetVM struct {
Resources *[]VirtualMachineExtension `json:"resources,omitempty"`
// Zones - READ-ONLY; The virtual machine zones.
Zones *[]string `json:"zones,omitempty"`
+ // Identity - The identity of the virtual machine, if configured.
+ Identity *VirtualMachineIdentity `json:"identity,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
@@ -21277,6 +21600,9 @@ func (vmssv VirtualMachineScaleSetVM) MarshalJSON() ([]byte, error) {
if vmssv.Plan != nil {
objectMap["plan"] = vmssv.Plan
}
+ if vmssv.Identity != nil {
+ objectMap["identity"] = vmssv.Identity
+ }
if vmssv.Location != nil {
objectMap["location"] = vmssv.Location
}
@@ -21349,6 +21675,15 @@ func (vmssv *VirtualMachineScaleSetVM) UnmarshalJSON(body []byte) error {
}
vmssv.Zones = &zones
}
+ case "identity":
+ if v != nil {
+ var identity VirtualMachineIdentity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ vmssv.Identity = &identity
+ }
case "id":
if v != nil {
var ID string
@@ -21950,7 +22285,7 @@ type VirtualMachineScaleSetVMProfile struct {
ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"`
// LicenseType - Specifies that the image or disk that is being used was licensed on-premises.
Possible values for Windows Server operating system are:
Windows_Client
Windows_Server
Possible values for Linux Server operating system are:
RHEL_BYOS (for RHEL)
SLES_BYOS (for SUSE)
For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing)
[Azure Hybrid Use Benefit for Linux Server](https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux)
Minimum api-version: 2015-06-15
LicenseType *string `json:"licenseType,omitempty"`
- // Priority - Specifies the priority for the virtual machines in the scale set.
Minimum api-version: 2017-10-30-preview. Possible values include: 'VirtualMachinePriorityTypesRegular', 'VirtualMachinePriorityTypesLow', 'VirtualMachinePriorityTypesSpot'
+ // Priority - Specifies the priority for the virtual machines in the scale set.
Minimum api-version: 2017-10-30-preview. Possible values include: 'Regular', 'Low', 'Spot'
Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
// EvictionPolicy - Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set.
For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. Possible values include: 'VirtualMachineEvictionPolicyTypesDeallocate', 'VirtualMachineEvictionPolicyTypesDelete'
EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
@@ -21964,6 +22299,8 @@ type VirtualMachineScaleSetVMProfile struct {
CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"`
// ApplicationProfile - Specifies the gallery applications that should be made available to the VM/VMSS
ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"`
+ // HardwareProfile - Specifies the hardware profile related details of a scale set.
Minimum api-version: 2022-03-01.
+ HardwareProfile *VirtualMachineScaleSetHardwareProfile `json:"hardwareProfile,omitempty"`
}
// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual
@@ -23350,6 +23687,15 @@ func (vmu *VirtualMachineUpdate) UnmarshalJSON(body []byte) error {
return nil
}
+// VMDiskSecurityProfile specifies the security profile settings for the managed disk.
NOTE: It
+// can only be set for Confidential VMs
+type VMDiskSecurityProfile struct {
+ // SecurityEncryptionType - Specifies the EncryptionType of the managed disk.
It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, and VMGuestStateOnly for encryption of just the VMGuestState blob.
NOTE: It can be set for only Confidential VMs. Possible values include: 'VMGuestStateOnly', 'DiskWithVMGuestState'
+ SecurityEncryptionType SecurityEncryptionTypes `json:"securityEncryptionType,omitempty"`
+ // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and VMGuest blob.
+ DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
+}
+
// VMGalleryApplication specifies the required information to reference a compute gallery application
// version
type VMGalleryApplication struct {
@@ -23361,6 +23707,19 @@ type VMGalleryApplication struct {
PackageReferenceID *string `json:"packageReferenceId,omitempty"`
// ConfigurationReference - Optional, Specifies the uri to an azure blob that will replace the default configuration for the package if provided
ConfigurationReference *string `json:"configurationReference,omitempty"`
+ // TreatFailureAsDeploymentFailure - Optional, If true, any failure for any operation in the VmApplication will fail the deployment
+ TreatFailureAsDeploymentFailure *bool `json:"treatFailureAsDeploymentFailure,omitempty"`
+ // EnableAutomaticUpgrade - If set to true, when a new Gallery Application version is available in PIR/SIG, it will be automatically updated for the VM/VMSS
+ EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"`
+}
+
+// VMImagesInEdgeZoneListResult the List VmImages in EdgeZone operation response.
+type VMImagesInEdgeZoneListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of VMImages in EdgeZone
+ Value *[]VirtualMachineImageResource `json:"value,omitempty"`
+ // NextLink - The URI to fetch the next page of VMImages in EdgeZone. Call ListNext() with this URI to fetch the next page of VmImages.
+ NextLink *string `json:"nextLink,omitempty"`
}
// VMScaleSetConvertToSinglePlacementGroupInput ...
@@ -23407,6 +23766,13 @@ type WindowsParameters struct {
MaxPatchPublishDate *date.Time `json:"maxPatchPublishDate,omitempty"`
}
+// WindowsVMGuestPatchAutomaticByPlatformSettings specifies additional settings to be applied when patch
+// mode AutomaticByPlatform is selected in Windows patch settings.
+type WindowsVMGuestPatchAutomaticByPlatformSettings struct {
+ // RebootSetting - Specifies the reboot setting for all AutomaticByPlatform patch installation operations. Possible values include: 'WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown', 'WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired', 'WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever', 'WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways'
+ RebootSetting WindowsVMGuestPatchAutomaticByPlatformRebootSetting `json:"rebootSetting,omitempty"`
+}
+
// WinRMConfiguration describes Windows Remote Management configuration of the VM
type WinRMConfiguration struct {
// Listeners - The list of Windows Remote Management listeners
@@ -23415,7 +23781,7 @@ type WinRMConfiguration struct {
// WinRMListener describes Protocol and thumbprint of Windows Remote Management listener
type WinRMListener struct {
- // Protocol - Specifies the protocol of WinRM listener.
Possible values are:
**http**
**https**. Possible values include: 'ProtocolTypesHTTP', 'ProtocolTypesHTTPS'
+ // Protocol - Specifies the protocol of WinRM listener.
Possible values are:
**http**
**https**. Possible values include: 'HTTP', 'HTTPS'
Protocol ProtocolTypes `json:"protocol,omitempty"`
// CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:
{
"data":"",
"dataType":"pfx",
"password":""
}
To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows).
CertificateURL *string `json:"certificateUrl,omitempty"`
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/operations.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/operations.go
index b76ee91bfdf0..8974d4ff0812 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/operations.go
@@ -66,7 +66,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe
// ListPreparer prepares the List request.
func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/proximityplacementgroups.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/proximityplacementgroups.go
index 8144933b91db..0b13634122f8 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/proximityplacementgroups.go
@@ -77,7 +77,7 @@ func (client ProximityPlacementGroupsClient) CreateOrUpdatePreparer(ctx context.
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -155,7 +155,7 @@ func (client ProximityPlacementGroupsClient) DeletePreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -232,7 +232,7 @@ func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -314,7 +314,7 @@ func (client ProximityPlacementGroupsClient) ListByResourceGroupPreparer(ctx con
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -427,7 +427,7 @@ func (client ProximityPlacementGroupsClient) ListBySubscriptionPreparer(ctx cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -541,7 +541,7 @@ func (client ProximityPlacementGroupsClient) UpdatePreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/resourceskus.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/resourceskus.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/restorepointcollections.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/restorepointcollections.go
index ea9d27484cef..d1997811b740 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/restorepointcollections.go
@@ -78,7 +78,7 @@ func (client RestorePointCollectionsClient) CreateOrUpdatePreparer(ctx context.C
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -150,7 +150,7 @@ func (client RestorePointCollectionsClient) DeletePreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -237,7 +237,7 @@ func (client RestorePointCollectionsClient) GetPreparer(ctx context.Context, res
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -319,7 +319,7 @@ func (client RestorePointCollectionsClient) ListPreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -434,7 +434,7 @@ func (client RestorePointCollectionsClient) ListAllPreparer(ctx context.Context)
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -548,7 +548,7 @@ func (client RestorePointCollectionsClient) UpdatePreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/restorepoints.go
similarity index 95%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/restorepoints.go
index ad79c4b0eec5..ac171854f76e 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/restorepoints.go
@@ -95,7 +95,7 @@ func (client RestorePointsClient) CreatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -178,7 +178,7 @@ func (client RestorePointsClient) DeletePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -223,7 +223,9 @@ func (client RestorePointsClient) DeleteResponder(resp *http.Response) (result a
// resourceGroupName - the name of the resource group.
// restorePointCollectionName - the name of the restore point collection.
// restorePointName - the name of the restore point.
-func (client RestorePointsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (result RestorePoint, err error) {
+// expand - the expand expression to apply on the operation. 'InstanceView' retrieves information about the
+// run-time state of a restore point.
+func (client RestorePointsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, expand RestorePointExpandOptions) (result RestorePoint, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Get")
defer func() {
@@ -234,7 +236,7 @@ func (client RestorePointsClient) Get(ctx context.Context, resourceGroupName str
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName)
+ req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", nil, "Failure preparing request")
return
@@ -257,7 +259,7 @@ func (client RestorePointsClient) Get(ctx context.Context, resourceGroupName str
}
// GetPreparer prepares the Get request.
-func (client RestorePointsClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (*http.Request, error) {
+func (client RestorePointsClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, expand RestorePointExpandOptions) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
@@ -265,10 +267,13 @@ func (client RestorePointsClient) GetPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sharedgalleries.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sharedgalleries.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sharedgalleryimages.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sharedgalleryimages.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sharedgalleryimageversions.go
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sharedgalleryimageversions.go
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/snapshots.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/snapshots.go
index 3590bc688f6b..792c3d82eb53 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/snapshots.go
@@ -92,7 +92,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -176,7 +176,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -263,7 +263,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -341,7 +341,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -429,7 +429,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -545,7 +545,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -653,7 +653,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -734,7 +734,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-04-01"
+ const APIVersion = "2021-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sshpublickeys.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sshpublickeys.go
index 896ec1f1edbf..0e554727dae2 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/sshpublickeys.go
@@ -76,7 +76,7 @@ func (client SSHPublicKeysClient) CreatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -154,7 +154,7 @@ func (client SSHPublicKeysClient) DeletePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -231,7 +231,7 @@ func (client SSHPublicKeysClient) GenerateKeyPairPreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -307,7 +307,7 @@ func (client SSHPublicKeysClient) GetPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -387,7 +387,7 @@ func (client SSHPublicKeysClient) ListByResourceGroupPreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -501,7 +501,7 @@ func (client SSHPublicKeysClient) ListBySubscriptionPreparer(ctx context.Context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -615,7 +615,7 @@ func (client SSHPublicKeysClient) UpdatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/usage.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/usage.go
index ab20765aebf9..8adea6c2629e 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/usage.go
@@ -86,7 +86,7 @@ func (client UsageClient) ListPreparer(ctx context.Context, location string) (*h
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/version.go
similarity index 90%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/version.go
index 56cb9d0c28d7..56165b81787c 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/version.go
@@ -10,7 +10,7 @@ import "github.com/Azure/azure-sdk-for-go/version"
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
- return "Azure-SDK-For-Go/" + Version() + " compute/2021-07-01"
+ return "Azure-SDK-For-Go/" + Version() + " compute/2022-03-01"
}
// Version returns the semantic version (see http://semver.org) of the client.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineextensionimages.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineextensionimages.go
index e359238e21cb..8055eded87ca 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineextensionimages.go
@@ -77,7 +77,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex
"version": autorest.Encode("path", version),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -152,7 +152,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context.
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -229,7 +229,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte
"type": autorest.Encode("path", typeParameter),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineextensions.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineextensions.go
index a73c28d02725..bf768eca7e0e 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineextensions.go
@@ -72,7 +72,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context.
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -155,7 +155,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context,
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -243,7 +243,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -323,7 +323,7 @@ func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, r
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -398,7 +398,7 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context,
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineimages.go
similarity index 84%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineimages.go
index 95b6c52e34af..8c07cf5e5f4d 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineimages.go
@@ -82,7 +82,7 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati
"version": autorest.Encode("path", version),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -163,7 +163,7 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -203,6 +203,82 @@ func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (res
return
}
+// ListByEdgeZone gets a list of all virtual machine image versions for the specified edge zone
+// Parameters:
+// location - the name of a supported Azure region.
+// edgeZone - the name of the edge zone.
+func (client VirtualMachineImagesClient) ListByEdgeZone(ctx context.Context, location string, edgeZone string) (result VMImagesInEdgeZoneListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.ListByEdgeZone")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListByEdgeZonePreparer(ctx, location, edgeZone)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListByEdgeZone", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByEdgeZoneSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListByEdgeZone", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByEdgeZoneResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListByEdgeZone", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListByEdgeZonePreparer prepares the ListByEdgeZone request.
+func (client VirtualMachineImagesClient) ListByEdgeZonePreparer(ctx context.Context, location string, edgeZone string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "edgeZone": autorest.Encode("path", edgeZone),
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2022-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/vmimages", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByEdgeZoneSender sends the ListByEdgeZone request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualMachineImagesClient) ListByEdgeZoneSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListByEdgeZoneResponder handles the response to the ListByEdgeZone request. The method always
+// closes the http.Response Body.
+func (client VirtualMachineImagesClient) ListByEdgeZoneResponder(resp *http.Response) (result VMImagesInEdgeZoneListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// ListOffers gets a list of virtual machine image offers for the specified location and publisher.
// Parameters:
// location - the name of a supported Azure region.
@@ -248,7 +324,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -322,7 +398,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -400,7 +476,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineimagesedgezone.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineimagesedgezone.go
index 8fb48bf680d2..576a70b53287 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineimagesedgezone.go
@@ -84,7 +84,7 @@ func (client VirtualMachineImagesEdgeZoneClient) GetPreparer(ctx context.Context
"version": autorest.Encode("path", version),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -170,7 +170,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListPreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -257,7 +257,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListOffersPreparer(ctx context.
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -333,7 +333,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListPublishersPreparer(ctx cont
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -413,7 +413,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListSkusPreparer(ctx context.Co
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineruncommands.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineruncommands.go
index 5a2f07fafb75..3935fd45a07d 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachineruncommands.go
@@ -73,7 +73,7 @@ func (client VirtualMachineRunCommandsClient) CreateOrUpdatePreparer(ctx context
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -156,7 +156,7 @@ func (client VirtualMachineRunCommandsClient) DeletePreparer(ctx context.Context
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -247,7 +247,7 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -326,7 +326,7 @@ func (client VirtualMachineRunCommandsClient) GetByVirtualMachinePreparer(ctx co
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -414,7 +414,7 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -533,7 +533,7 @@ func (client VirtualMachineRunCommandsClient) ListByVirtualMachinePreparer(ctx c
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -645,7 +645,7 @@ func (client VirtualMachineRunCommandsClient) UpdatePreparer(ctx context.Context
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachines.go
similarity index 97%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachines.go
index d1aff5e7573b..934842c07016 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachines.go
@@ -69,7 +69,7 @@ func (client VirtualMachinesClient) AssessPatchesPreparer(ctx context.Context, r
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -158,7 +158,7 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -240,7 +240,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -341,7 +341,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context,
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -425,7 +425,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -472,7 +472,7 @@ func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (re
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
-// forceDeletion - optional parameter to force delete virtual machines.(Feature in Preview)
+// forceDeletion - optional parameter to force delete virtual machines.
func (client VirtualMachinesClient) Delete(ctx context.Context, resourceGroupName string, VMName string, forceDeletion *bool) (result VirtualMachinesDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Delete")
@@ -507,7 +507,7 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -599,7 +599,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -678,7 +678,7 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -751,7 +751,7 @@ func (client VirtualMachinesClient) InstallPatchesPreparer(ctx context.Context,
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -839,7 +839,7 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -874,7 +874,10 @@ func (client VirtualMachinesClient) InstanceViewResponder(resp *http.Response) (
// get the next page of virtual machines.
// Parameters:
// resourceGroupName - the name of the resource group.
-func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultPage, err error) {
+// filter - the system query option to filter VMs returned in the response. Allowed value is
+// 'virtualMachineScaleSet/id' eq
+// /subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmssName}'
+func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName string, filter string) (result VirtualMachineListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.List")
defer func() {
@@ -886,7 +889,7 @@ func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName
}()
}
result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName)
+ req, err := client.ListPreparer(ctx, resourceGroupName, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request")
return
@@ -913,16 +916,19 @@ func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName
}
// ListPreparer prepares the List request.
-func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGroupName string, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
@@ -972,7 +978,7 @@ func (client VirtualMachinesClient) listNextResults(ctx context.Context, lastRes
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultIterator, err error) {
+func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGroupName string, filter string) (result VirtualMachineListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.List")
defer func() {
@@ -983,7 +989,7 @@ func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGr
tracing.EndSpan(ctx, sc, err)
}()
}
- result.page, err = client.List(ctx, resourceGroupName)
+ result.page, err = client.List(ctx, resourceGroupName, filter)
return
}
@@ -991,7 +997,10 @@ func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGr
// to get the next page of virtual machines.
// Parameters:
// statusOnly - statusOnly=true enables fetching run time status of all Virtual Machines in the subscription.
-func (client VirtualMachinesClient) ListAll(ctx context.Context, statusOnly string) (result VirtualMachineListResultPage, err error) {
+// filter - the system query option to filter VMs returned in the response. Allowed value is
+// 'virtualMachineScaleSet/id' eq
+// /subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmssName}'
+func (client VirtualMachinesClient) ListAll(ctx context.Context, statusOnly string, filter string) (result VirtualMachineListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAll")
defer func() {
@@ -1003,7 +1012,7 @@ func (client VirtualMachinesClient) ListAll(ctx context.Context, statusOnly stri
}()
}
result.fn = client.listAllNextResults
- req, err := client.ListAllPreparer(ctx, statusOnly)
+ req, err := client.ListAllPreparer(ctx, statusOnly, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request")
return
@@ -1030,18 +1039,21 @@ func (client VirtualMachinesClient) ListAll(ctx context.Context, statusOnly stri
}
// ListAllPreparer prepares the ListAll request.
-func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context, statusOnly string) (*http.Request, error) {
+func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context, statusOnly string, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(statusOnly) > 0 {
queryParameters["statusOnly"] = autorest.Encode("query", statusOnly)
}
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
@@ -1091,7 +1103,7 @@ func (client VirtualMachinesClient) listAllNextResults(ctx context.Context, last
}
// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachinesClient) ListAllComplete(ctx context.Context, statusOnly string) (result VirtualMachineListResultIterator, err error) {
+func (client VirtualMachinesClient) ListAllComplete(ctx context.Context, statusOnly string, filter string) (result VirtualMachineListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAll")
defer func() {
@@ -1102,7 +1114,7 @@ func (client VirtualMachinesClient) ListAllComplete(ctx context.Context, statusO
tracing.EndSpan(ctx, sc, err)
}()
}
- result.page, err = client.ListAll(ctx, statusOnly)
+ result.page, err = client.ListAll(ctx, statusOnly, filter)
return
}
@@ -1151,7 +1163,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1236,7 +1248,7 @@ func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1342,7 +1354,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1424,7 +1436,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1507,7 +1519,7 @@ func (client VirtualMachinesClient) ReapplyPreparer(ctx context.Context, resourc
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1585,7 +1597,7 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1664,7 +1676,7 @@ func (client VirtualMachinesClient) ReimagePreparer(ctx context.Context, resourc
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1747,7 +1759,7 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1835,7 +1847,7 @@ func (client VirtualMachinesClient) RetrieveBootDiagnosticsDataPreparer(ctx cont
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1914,7 +1926,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -2002,7 +2014,7 @@ func (client VirtualMachinesClient) SimulateEvictionPreparer(ctx context.Context
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -2070,7 +2082,7 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -2149,7 +2161,7 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource
"vmName": autorest.Encode("path", VMName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetextensions.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetextensions.go
index 9e71f3ba9100..d9b42d118360 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetextensions.go
@@ -72,7 +72,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx
"vmssExtensionName": autorest.Encode("path", vmssExtensionName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -156,7 +156,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context.
"vmssExtensionName": autorest.Encode("path", vmssExtensionName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -244,7 +244,7 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con
"vmssExtensionName": autorest.Encode("path", vmssExtensionName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -328,7 +328,7 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -437,7 +437,7 @@ func (client VirtualMachineScaleSetExtensionsClient) UpdatePreparer(ctx context.
"vmssExtensionName": autorest.Encode("path", vmssExtensionName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetrollingupgrades.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetrollingupgrades.go
index be9de5194bb5..a1b349866ced 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetrollingupgrades.go
@@ -70,7 +70,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -155,7 +155,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -226,7 +226,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeP
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -305,7 +305,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesets.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesets.go
index 28523a860773..5e8af862fe83 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesets.go
@@ -79,7 +79,7 @@ func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupPrepare
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -173,7 +173,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -256,7 +256,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -340,7 +340,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context,
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -430,7 +430,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context.
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -481,7 +481,9 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// platformUpdateDomain - the platform update domain for which a manual recovery walk is requested
-func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (result RecoveryWalkResponse, err error) {
+// zone - the zone in which the manual recovery walk is requested for cross zone virtual machine scale set
+// placementGroupID - the placement group id for which the manual recovery walk is requested.
+func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32, zone string, placementGroupID string) (result RecoveryWalkResponse, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk")
defer func() {
@@ -492,7 +494,7 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx, resourceGroupName, VMScaleSetName, platformUpdateDomain)
+ req, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx, resourceGroupName, VMScaleSetName, platformUpdateDomain, zone, placementGroupID)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", nil, "Failure preparing request")
return
@@ -515,18 +517,24 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp
}
// ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer prepares the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request.
-func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (*http.Request, error) {
+func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32, zone string, placementGroupID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
"platformUpdateDomain": autorest.Encode("query", platformUpdateDomain),
}
+ if len(zone) > 0 {
+ queryParameters["zone"] = autorest.Encode("query", zone)
+ }
+ if len(placementGroupID) > 0 {
+ queryParameters["placementGroupId"] = autorest.Encode("query", placementGroupID)
+ }
preparer := autorest.CreatePreparer(
autorest.AsPost(),
@@ -601,7 +609,7 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, res
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -680,7 +688,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context.
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -761,7 +769,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -877,7 +885,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -992,7 +1000,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context)
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1114,7 +1122,7 @@ func (client VirtualMachineScaleSetsClient) ListByLocationPreparer(ctx context.C
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1233,7 +1241,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1342,7 +1350,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1430,7 +1438,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1520,7 +1528,7 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1605,7 +1613,7 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context,
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1690,7 +1698,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1774,7 +1782,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context,
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1858,7 +1866,7 @@ func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceStatePreparer
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1939,7 +1947,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -2023,7 +2031,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context,
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -2111,7 +2119,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context.
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvmextensions.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvmextensions.go
index 9713046e25d8..37b146255709 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvmextensions.go
@@ -75,7 +75,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdatePreparer(ct
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -162,7 +162,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) DeletePreparer(ctx contex
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -252,7 +252,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) GetPreparer(ctx context.C
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -334,7 +334,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) ListPreparer(ctx context.
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -411,7 +411,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) UpdatePreparer(ctx contex
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvmruncommands.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvmruncommands.go
index 4fbb0c104507..34f298e84833 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvmruncommands.go
@@ -75,7 +75,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdatePreparer(c
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -160,7 +160,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) DeletePreparer(ctx conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -250,7 +250,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) GetPreparer(ctx context.
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -337,7 +337,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) ListPreparer(ctx context
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -451,7 +451,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) UpdatePreparer(ctx conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvms.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvms.go
index f46f6c4f52f1..f155885f9bc6 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinescalesetvms.go
@@ -74,7 +74,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -82,7 +82,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/deallocate", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
@@ -156,7 +156,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -167,7 +167,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
@@ -248,7 +248,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -259,7 +259,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
@@ -329,7 +329,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -337,7 +337,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/instanceView", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
@@ -414,7 +414,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context,
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -531,7 +531,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -615,7 +615,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -701,7 +701,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -782,7 +782,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -791,7 +791,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/reimage", pathParameters),
autorest.WithQueryParameters(queryParameters))
if VMScaleSetVMReimageInput != nil {
preparer = autorest.DecoratePreparer(preparer,
@@ -868,7 +868,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -876,7 +876,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/reimageall", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
@@ -948,7 +948,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1039,7 +1039,7 @@ func (client VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsDataPrepare
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1120,7 +1120,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Con
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1210,7 +1210,7 @@ func (client VirtualMachineScaleSetVMsClient) SimulateEvictionPreparer(ctx conte
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1280,7 +1280,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context,
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1382,7 +1382,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -1395,7 +1395,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinesizes.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go
rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinesizes.go
index 394d659c218c..90a976ce0ea5 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute/virtualmachinesizes.go
@@ -81,7 +81,7 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2021-07-01"
+ const APIVersion = "2022-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go
index 79937e123765..f329deac33c0 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package containerregistry implements the Azure ARM Containerregistry service API version .
//
//
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/client.go
index 0720193b7aaa..ce1a6043a98b 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package containerservice implements the Azure ARM Containerservice service API version .
//
// The Container Service Client.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice/client.go
index 7b32e332fd4c..7808f5a0493d 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package containerservice implements the Azure ARM Containerservice service API version 2021-10-01.
//
// The Container Service Client.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/client.go
index 535f4ca3e282..a9bf2394a295 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package network implements the Azure ARM Network service API version .
//
// Network Client
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/CHANGELOG.md
index 93fd201bfc3e..52911e4cc5e4 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/CHANGELOG.md
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/CHANGELOG.md
@@ -1,9 +1,2 @@
# Change History
-## Additive Changes
-
-### Struct Changes
-
-#### New Struct Fields
-
-1. ApplicationGatewayRoutingRulePropertiesFormat.Priority
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/_meta.json
index 9b636be005b5..04957b5c7609 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/_meta.json
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/_meta.json
@@ -1,10 +1,10 @@
{
- "commit": "a8a52b9e6c305f03c3a4c5411d59fc4454b5b372",
+ "commit": "1c8d7850afbec9ede6de6f2d14bcc30896a74ed6",
"readme": "/_/azure-rest-api-specs/specification/network/resource-manager/readme.md",
"tag": "package-2021-08",
- "use": "@microsoft.azure/autorest.go@2.1.187",
+ "use": "@microsoft.azure/autorest.go@2.1.188",
"repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
- "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-08 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/network/resource-manager/readme.md",
+ "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.188 --tag=package-2021-08 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/network/resource-manager/readme.md",
"additional_properties": {
"additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix"
}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/client.go
index 47c958e96bb1..b8d584889611 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package network implements the Azure ARM Network service API version 2021-08-01.
//
// Network Client
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go
index 48bd23da58c1..b01b463772e6 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package privatedns implements the Azure ARM Privatedns service API version 2018-09-01.
//
// The Private DNS Management Client.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/client.go
index 68623ec9cc91..8a7064e5995f 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package resources implements the Azure ARM Resources service API version 2017-05-10.
//
// Provides operations for working with resources and resource groups.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
index 689b44529880..a61c8d456843 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package storage implements the Azure ARM Storage service API version 2019-06-01.
//
// The Azure Storage Management API.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go
index cb5dc8282ae3..71aa87b9192e 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go
@@ -1,3 +1,5 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
// Package storage implements the Azure ARM Storage service API version 2021-02-01.
//
// The Azure Storage Management API.
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/CHANGELOG.md
new file mode 100644
index 000000000000..52911e4cc5e4
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/CHANGELOG.md
@@ -0,0 +1,2 @@
+# Change History
+
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/_meta.json
new file mode 100644
index 000000000000..8eabeb9334d4
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/_meta.json
@@ -0,0 +1,11 @@
+{
+ "commit": "1c8d7850afbec9ede6de6f2d14bcc30896a74ed6",
+ "readme": "/_/azure-rest-api-specs/specification/storage/resource-manager/readme.md",
+ "tag": "package-2021-09",
+ "use": "@microsoft.azure/autorest.go@2.1.188",
+ "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
+ "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.188 --tag=package-2021-09 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/storage/resource-manager/readme.md",
+ "additional_properties": {
+ "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix"
+ }
+}
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/accounts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/accounts.go
new file mode 100644
index 000000000000..53d37ea455ac
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/accounts.go
@@ -0,0 +1,1642 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// AccountsClient is the the Azure Storage Management API.
+type AccountsClient struct {
+ BaseClient
+}
+
+// NewAccountsClient creates an instance of the AccountsClient client.
+func NewAccountsClient(subscriptionID string) AccountsClient {
+ return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client using a custom endpoint. Use this
+// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient {
+ return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// AbortHierarchicalNamespaceMigration abort live Migration of storage account to enable Hns
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) AbortHierarchicalNamespaceMigration(ctx context.Context, resourceGroupName string, accountName string) (result AccountsAbortHierarchicalNamespaceMigrationFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.AbortHierarchicalNamespaceMigration")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "AbortHierarchicalNamespaceMigration", err.Error())
+ }
+
+ req, err := client.AbortHierarchicalNamespaceMigrationPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "AbortHierarchicalNamespaceMigration", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.AbortHierarchicalNamespaceMigrationSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "AbortHierarchicalNamespaceMigration", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// AbortHierarchicalNamespaceMigrationPreparer prepares the AbortHierarchicalNamespaceMigration request.
+func (client AccountsClient) AbortHierarchicalNamespaceMigrationPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/aborthnsonmigration", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AbortHierarchicalNamespaceMigrationSender sends the AbortHierarchicalNamespaceMigration request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) AbortHierarchicalNamespaceMigrationSender(req *http.Request) (future AccountsAbortHierarchicalNamespaceMigrationFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// AbortHierarchicalNamespaceMigrationResponder handles the response to the AbortHierarchicalNamespaceMigration request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) AbortHierarchicalNamespaceMigrationResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// CheckNameAvailability checks that the storage account name is valid and is not already in use.
+// Parameters:
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) CheckNameAvailability(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "CheckNameAvailability", err.Error())
+ }
+
+ req, err := client.CheckNameAvailabilityPreparer(ctx, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckNameAvailabilitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckNameAvailabilityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request.
+func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters),
+ autorest.WithJSON(accountName),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Create asynchronously creates a new storage account with the specified parameters. If an account is already created
+// and a subsequent create request is issued with different properties, the account properties will be updated. If an
+// account is already created and a subsequent create or update request is issued with the exact same set of
+// properties, the request will succeed.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide for the created account.
+func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (result AccountsCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.SasPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.SasPolicy.SasExpirationPeriod", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.SasPolicy.ExpirationAction", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ {Target: "parameters.AccountPropertiesCreateParameters.KeyPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.KeyPolicy.KeyExpirationPeriodInDays", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.NetBiosDomainName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.ForestName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainGUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainSid", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.AzureStorageSid", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ {Target: "parameters.AccountPropertiesCreateParameters.ImmutableStorageWithVersioning", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.ImmutableStorageWithVersioning.ImmutabilityPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.ImmutableStorageWithVersioning.ImmutabilityPolicy.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.ImmutableStorageWithVersioning.ImmutabilityPolicy.ImmutabilityPeriodSinceCreationInDays", Name: validation.InclusiveMaximum, Rule: int64(146000), Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.ImmutableStorageWithVersioning.ImmutabilityPolicy.ImmutabilityPeriodSinceCreationInDays", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }},
+ }},
+ }}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a storage account in Microsoft Azure.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Failover failover request can be triggered for a storage account in case of availability issues. The failover occurs
+// from the storage account's primary cluster to secondary cluster for RA-GRS accounts. The secondary cluster will
+// become primary after failover.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) Failover(ctx context.Context, resourceGroupName string, accountName string) (result AccountsFailoverFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Failover")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Failover", err.Error())
+ }
+
+ req, err := client.FailoverPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.FailoverSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// FailoverPreparer prepares the Failover request.
+func (client AccountsClient) FailoverPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// FailoverSender sends the Failover request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) FailoverSender(req *http.Request) (future AccountsFailoverFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// FailoverResponder handles the response to the Failover request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) FailoverResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name,
+// location, and account status. The ListKeys operation should be used to retrieve storage keys.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// expand - may be used to expand the properties within account's properties. By default, data is not included
+// when fetching properties. Currently we only support geoReplicationStats and blobRestoreStatus.
+func (client AccountsClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (result Account, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.GetProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "GetProperties", err.Error())
+ }
+
+ req, err := client.GetPropertiesPreparer(ctx, resourceGroupName, accountName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetPropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetPropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPropertiesPreparer prepares the GetProperties request.
+func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetPropertiesSender sends the GetProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetPropertiesResponder handles the response to the GetProperties request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// HierarchicalNamespaceMigration live Migration of storage account to enable Hns
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// requestType - required. Hierarchical namespace migration type can either be a hierarchical namespace
+// validation request 'HnsOnValidationRequest' or a hydration request 'HnsOnHydrationRequest'. The validation
+// request will validate the migration whereas the hydration request will migrate the account.
+func (client AccountsClient) HierarchicalNamespaceMigration(ctx context.Context, resourceGroupName string, accountName string, requestType string) (result AccountsHierarchicalNamespaceMigrationFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.HierarchicalNamespaceMigration")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "HierarchicalNamespaceMigration", err.Error())
+ }
+
+ req, err := client.HierarchicalNamespaceMigrationPreparer(ctx, resourceGroupName, accountName, requestType)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "HierarchicalNamespaceMigration", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.HierarchicalNamespaceMigrationSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "HierarchicalNamespaceMigration", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// HierarchicalNamespaceMigrationPreparer prepares the HierarchicalNamespaceMigration request.
+func (client AccountsClient) HierarchicalNamespaceMigrationPreparer(ctx context.Context, resourceGroupName string, accountName string, requestType string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ "requestType": autorest.Encode("query", requestType),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/hnsonmigration", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// HierarchicalNamespaceMigrationSender sends the HierarchicalNamespaceMigration request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) HierarchicalNamespaceMigrationSender(req *http.Request) (future AccountsHierarchicalNamespaceMigrationFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// HierarchicalNamespaceMigrationResponder handles the response to the HierarchicalNamespaceMigration request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) HierarchicalNamespaceMigrationResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use
+// the ListKeys operation for this.
+func (client AccountsClient) List(ctx context.Context) (result AccountListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.alr.hasNextLink() && result.alr.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client AccountsClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) {
+ req, err := lastResults.accountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountsClient) ListComplete(ctx context.Context) (result AccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListAccountSAS list SAS credentials of a storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide to list SAS credentials for the storage account.
+func (client AccountsClient) ListAccountSAS(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListAccountSAS")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListAccountSAS", err.Error())
+ }
+
+ req, err := client.ListAccountSASPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListAccountSASSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListAccountSASResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListAccountSASPreparer prepares the ListAccountSAS request.
+func (client AccountsClient) ListAccountSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListAccountSASSender sends the ListAccountSAS request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys
+// are not returned; use the ListKeys operation for this.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AccountListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListByResourceGroup", err.Error())
+ }
+
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request")
+ return
+ }
+ if result.alr.hasNextLink() && result.alr.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client AccountsClient) listByResourceGroupNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) {
+ req, err := lastResults.accountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ListKeys lists the access keys or Kerberos keys (if active directory enabled) for the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// expand - specifies type of the key to be listed. Possible value is kerb.
+func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (result AccountListKeysResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error())
+ }
+
+ req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListKeysPreparer prepares the ListKeys request.
+func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListKeysSender sends the ListKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListKeysResponder handles the response to the ListKeys request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListServiceSAS list service SAS credentials of a specific resource.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide to list service SAS credentials.
+func (client AccountsClient) ListServiceSAS(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListServiceSAS")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Identifier", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListServiceSAS", err.Error())
+ }
+
+ req, err := client.ListServiceSASPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListServiceSASSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListServiceSASResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListServiceSASPreparer prepares the ListServiceSAS request.
+func (client AccountsClient) ListServiceSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListServiceSASSender sends the ListServiceSAS request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RegenerateKey regenerates one of the access keys or Kerberos keys for the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// regenerateKey - specifies name of the key which should be regenerated -- key1, key2, kerb1, kerb2.
+func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RegenerateKey")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: regenerateKey,
+ Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "RegenerateKey", err.Error())
+ }
+
+ req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, regenerateKey)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RegenerateKeySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RegenerateKeyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// RegenerateKeyPreparer prepares the RegenerateKey request.
+func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters),
+ autorest.WithJSON(regenerateKey),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RegenerateKeySender sends the RegenerateKey request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RestoreBlobRanges restore blobs in the specified blob ranges
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide for restore blob ranges.
+func (client AccountsClient) RestoreBlobRanges(ctx context.Context, resourceGroupName string, accountName string, parameters BlobRestoreParameters) (result AccountsRestoreBlobRangesFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RestoreBlobRanges")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.TimeToRestore", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.BlobRanges", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "RestoreBlobRanges", err.Error())
+ }
+
+ req, err := client.RestoreBlobRangesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RestoreBlobRanges", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.RestoreBlobRangesSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RestoreBlobRanges", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// RestoreBlobRangesPreparer prepares the RestoreBlobRanges request.
+func (client AccountsClient) RestoreBlobRangesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobRestoreParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RestoreBlobRangesSender sends the RestoreBlobRanges request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) RestoreBlobRangesSender(req *http.Request) (future AccountsRestoreBlobRangesFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// RestoreBlobRangesResponder handles the response to the RestoreBlobRanges request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) RestoreBlobRangesResponder(resp *http.Response) (result BlobRestoreStatus, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RevokeUserDelegationKeys revoke user delegation keys.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) RevokeUserDelegationKeys(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RevokeUserDelegationKeys")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "RevokeUserDelegationKeys", err.Error())
+ }
+
+ req, err := client.RevokeUserDelegationKeysPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RevokeUserDelegationKeysSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RevokeUserDelegationKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// RevokeUserDelegationKeysPreparer prepares the RevokeUserDelegationKeys request.
+func (client AccountsClient) RevokeUserDelegationKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RevokeUserDelegationKeysSender sends the RevokeUserDelegationKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) RevokeUserDelegationKeysSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// RevokeUserDelegationKeysResponder handles the response to the RevokeUserDelegationKeys request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) RevokeUserDelegationKeysResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account.
+// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account;
+// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value
+// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This
+// call does not change the storage keys for the account. If you want to change the storage account keys, use the
+// regenerate keys operation. The location and name of the storage account cannot be changed after creation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide for the updated account.
+func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobcontainers.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobcontainers.go
new file mode 100644
index 000000000000..ab6e1f974fc3
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobcontainers.go
@@ -0,0 +1,1539 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BlobContainersClient is the the Azure Storage Management API.
+type BlobContainersClient struct {
+ BaseClient
+}
+
+// NewBlobContainersClient creates an instance of the BlobContainersClient client.
+func NewBlobContainersClient(subscriptionID string) BlobContainersClient {
+ return NewBlobContainersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewBlobContainersClientWithBaseURI creates an instance of the BlobContainersClient client using a custom endpoint.
+// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient {
+ return BlobContainersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ClearLegalHold clears legal hold tags. Clearing the same or non-existent tag results in an idempotent operation.
+// ClearLegalHold clears out only the specified tags in the request.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// legalHold - the LegalHold property that will be clear from a blob container.
+func (client BlobContainersClient) ClearLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ClearLegalHold")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: legalHold,
+ Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "ClearLegalHold", err.Error())
+ }
+
+ req, err := client.ClearLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ClearLegalHoldSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ClearLegalHoldResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ClearLegalHoldPreparer prepares the ClearLegalHold request.
+func (client BlobContainersClient) ClearLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ legalHold.HasLegalHold = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold", pathParameters),
+ autorest.WithJSON(legalHold),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ClearLegalHoldSender sends the ClearLegalHold request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ClearLegalHoldSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ClearLegalHoldResponder handles the response to the ClearLegalHold request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ClearLegalHoldResponder(resp *http.Response) (result LegalHold, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Create creates a new container under the specified account as described by request body. The container resource
+// includes metadata and properties for that container. It does not include a list of the blobs contained by the
+// container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// blobContainer - properties of the blob container to create.
+func (client BlobContainersClient) Create(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client BlobContainersClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithJSON(blobContainer),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) CreateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) CreateResponder(resp *http.Response) (result BlobContainer, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// CreateOrUpdateImmutabilityPolicy creates or updates an unlocked immutability policy. ETag in If-Match is honored if
+// given but not required for this operation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// parameters - the ImmutabilityPolicy Properties that will be created or updated to a blob container.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.CreateOrUpdateImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.CreateOrUpdateImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, parameters, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdateImmutabilityPolicyPreparer prepares the CreateOrUpdateImmutabilityPolicy request.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "immutabilityPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateImmutabilityPolicySender sends the CreateOrUpdateImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateImmutabilityPolicyResponder handles the response to the CreateOrUpdateImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes specified container under its account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+func (client BlobContainersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, containerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client BlobContainersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteImmutabilityPolicy aborts an unlocked immutability policy. The response of delete has
+// immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this operation. Deleting a locked
+// immutability policy is not allowed, the only way is to delete the container after deleting all expired blobs inside
+// the policy locked container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) DeleteImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.DeleteImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "DeleteImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.DeleteImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request.
+func (client BlobContainersClient) DeleteImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "immutabilityPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteImmutabilityPolicySender sends the DeleteImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) DeleteImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) DeleteImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ExtendImmutabilityPolicy extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
+// action allowed on a Locked policy will be this action. ETag in If-Match is required for this operation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+// parameters - the ImmutabilityPolicy Properties that will be extended for a blob container.
+func (client BlobContainersClient) ExtendImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ExtendImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "ExtendImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.ExtendImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ExtendImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ExtendImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ExtendImmutabilityPolicyPreparer prepares the ExtendImmutabilityPolicy request.
+func (client BlobContainersClient) ExtendImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ExtendImmutabilityPolicySender sends the ExtendImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ExtendImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ExtendImmutabilityPolicyResponder handles the response to the ExtendImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ExtendImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Get gets properties of a specified container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+func (client BlobContainersClient) Get(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, containerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client BlobContainersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) GetResponder(resp *http.Response) (result BlobContainer, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetImmutabilityPolicy gets the existing immutability policy along with the corresponding ETag in response headers
+// and body.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) GetImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.GetImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "GetImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.GetImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetImmutabilityPolicyPreparer prepares the GetImmutabilityPolicy request.
+func (client BlobContainersClient) GetImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "immutabilityPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetImmutabilityPolicySender sends the GetImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) GetImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetImmutabilityPolicyResponder handles the response to the GetImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) GetImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Lease the Lease Container operation establishes and manages a lock on a container for delete operations. The lock
+// duration can be 15 to 60 seconds, or can be infinite.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// parameters - lease Container request body.
+func (client BlobContainersClient) Lease(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (result LeaseContainerResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Lease")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Lease", err.Error())
+ }
+
+ req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, containerName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.LeaseSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.LeaseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// LeasePreparer prepares the Lease request.
+func (client BlobContainersClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// LeaseSender sends the Lease request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) LeaseSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// LeaseResponder handles the response to the Lease request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) LeaseResponder(resp *http.Response) (result LeaseContainerResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all containers and does not support a prefix like data plane. Also SRP today does not return continuation
+// token.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// maxpagesize - optional. Specified maximum number of containers that can be included in the list.
+// filter - optional. When specified, only container names starting with the filter will be listed.
+// include - optional, used to include the properties for soft deleted blob containers.
+func (client BlobContainersClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (result ListContainerItemsPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List")
+ defer func() {
+ sc := -1
+ if result.lci.Response.Response != nil {
+ sc = result.lci.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter, include)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lci.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lci, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.lci.hasNextLink() && result.lci.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client BlobContainersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(maxpagesize) > 0 {
+ queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(string(include)) > 0 {
+ queryParameters["$include"] = autorest.Encode("query", include)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ListResponder(resp *http.Response) (result ListContainerItems, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client BlobContainersClient) listNextResults(ctx context.Context, lastResults ListContainerItems) (result ListContainerItems, err error) {
+ req, err := lastResults.listContainerItemsPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client BlobContainersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (result ListContainerItemsIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter, include)
+ return
+}
+
+// LockImmutabilityPolicy sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
+// ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) LockImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.LockImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "LockImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.LockImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.LockImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.LockImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// LockImmutabilityPolicyPreparer prepares the LockImmutabilityPolicy request.
+func (client BlobContainersClient) LockImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// LockImmutabilityPolicySender sends the LockImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) LockImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// LockImmutabilityPolicyResponder handles the response to the LockImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) LockImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ObjectLevelWorm this operation migrates a blob container from container level WORM to object level immutability
+// enabled container. Prerequisites require a container level immutability policy either in locked or unlocked state,
+// Account level versioning must be enabled and there should be no Legal hold on the container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+func (client BlobContainersClient) ObjectLevelWorm(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainersObjectLevelWormFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ObjectLevelWorm")
+ defer func() {
+ sc := -1
+ if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+ sc = result.FutureAPI.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "ObjectLevelWorm", err.Error())
+ }
+
+ req, err := client.ObjectLevelWormPreparer(ctx, resourceGroupName, accountName, containerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ObjectLevelWorm", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.ObjectLevelWormSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ObjectLevelWorm", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// ObjectLevelWormPreparer prepares the ObjectLevelWorm request.
+func (client BlobContainersClient) ObjectLevelWormPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/migrate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ObjectLevelWormSender sends the ObjectLevelWorm request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ObjectLevelWormSender(req *http.Request) (future BlobContainersObjectLevelWormFuture, err error) {
+ var resp *http.Response
+ future.FutureAPI = &azure.Future{}
+ resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+ if err != nil {
+ return
+ }
+ var azf azure.Future
+ azf, err = azure.NewFutureFromResponse(resp)
+ future.FutureAPI = &azf
+ future.Result = future.result
+ return
+}
+
+// ObjectLevelWormResponder handles the response to the ObjectLevelWorm request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ObjectLevelWormResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// SetLegalHold sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold follows an
+// append pattern and does not clear out the existing tags that are not specified in the request.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// legalHold - the LegalHold property that will be set to a blob container.
+func (client BlobContainersClient) SetLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.SetLegalHold")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: legalHold,
+ Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "SetLegalHold", err.Error())
+ }
+
+ req, err := client.SetLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetLegalHoldSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetLegalHoldResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// SetLegalHoldPreparer prepares the SetLegalHold request.
+func (client BlobContainersClient) SetLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ legalHold.HasLegalHold = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold", pathParameters),
+ autorest.WithJSON(legalHold),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetLegalHoldSender sends the SetLegalHold request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) SetLegalHoldSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// SetLegalHoldResponder handles the response to the SetLegalHold request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) SetLegalHoldResponder(resp *http.Response) (result LegalHold, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update updates container properties as specified in request body. Properties not mentioned in the request will be
+// unchanged. Update fails if the specified container doesn't already exist.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// blobContainer - properties to update for the blob container.
+func (client BlobContainersClient) Update(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client BlobContainersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithJSON(blobContainer),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) UpdateResponder(resp *http.Response) (result BlobContainer, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobinventorypolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobinventorypolicies.go
new file mode 100644
index 000000000000..112f6c1a95c5
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobinventorypolicies.go
@@ -0,0 +1,410 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BlobInventoryPoliciesClient is the the Azure Storage Management API.
+type BlobInventoryPoliciesClient struct {
+ BaseClient
+}
+
+// NewBlobInventoryPoliciesClient creates an instance of the BlobInventoryPoliciesClient client.
+func NewBlobInventoryPoliciesClient(subscriptionID string) BlobInventoryPoliciesClient {
+ return NewBlobInventoryPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewBlobInventoryPoliciesClientWithBaseURI creates an instance of the BlobInventoryPoliciesClient client using a
+// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
+// Azure stack).
+func NewBlobInventoryPoliciesClientWithBaseURI(baseURI string, subscriptionID string) BlobInventoryPoliciesClient {
+ return BlobInventoryPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate sets the blob inventory policy to the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// properties - the blob inventory policy set to a storage account.
+func (client BlobInventoryPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties BlobInventoryPolicy) (result BlobInventoryPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: properties,
+ Constraints: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties.Policy", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties.Policy.Enabled", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "properties.BlobInventoryPolicyProperties.Policy.Type", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "properties.BlobInventoryPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.BlobInventoryPoliciesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client BlobInventoryPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties BlobInventoryPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "blobInventoryPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobInventoryPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client BlobInventoryPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result BlobInventoryPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the blob inventory policy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobInventoryPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobInventoryPoliciesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client BlobInventoryPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "blobInventoryPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobInventoryPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client BlobInventoryPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the blob inventory policy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobInventoryPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result BlobInventoryPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobInventoryPoliciesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client BlobInventoryPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "blobInventoryPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobInventoryPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client BlobInventoryPoliciesClient) GetResponder(resp *http.Response) (result BlobInventoryPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets the blob inventory policy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobInventoryPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListBlobInventoryPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobInventoryPoliciesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client BlobInventoryPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobInventoryPoliciesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client BlobInventoryPoliciesClient) ListResponder(resp *http.Response) (result ListBlobInventoryPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobservices.go
new file mode 100644
index 000000000000..c25c66069545
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/blobservices.go
@@ -0,0 +1,344 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BlobServicesClient is the the Azure Storage Management API.
+type BlobServicesClient struct {
+ BaseClient
+}
+
+// NewBlobServicesClient creates an instance of the BlobServicesClient client.
+func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
+ return NewBlobServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewBlobServicesClientWithBaseURI creates an instance of the BlobServicesClient client using a custom endpoint. Use
+// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
+ return BlobServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of a storage account’s Blob service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client BlobServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "BlobServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list blob services of storage account. It returns a collection of one object named default.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of a storage account’s Blob service, including properties for Storage Analytics
+// and CORS (Cross-Origin Resource Sharing) rules.
+func (client BlobServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (result BlobServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }},
+ {Target: "parameters.BlobServicePropertiesProperties.ChangeFeed", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.InclusiveMaximum, Rule: int64(146000), Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }},
+ {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Enabled", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }},
+ {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }},
+ {Target: "parameters.BlobServicePropertiesProperties.LastAccessTimeTrackingPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.LastAccessTimeTrackingPolicy.Enable", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client BlobServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "BlobServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.Sku = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) SetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/client.go
new file mode 100644
index 000000000000..dd8dcb74cf8e
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/client.go
@@ -0,0 +1,43 @@
+// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
+//
+// Package storage implements the Azure ARM Storage service API version 2021-09-01.
+//
+// The Azure Storage Management API.
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Storage
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Storage.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with
+// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/deletedaccounts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/deletedaccounts.go
new file mode 100644
index 000000000000..a8abefe86263
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/deletedaccounts.go
@@ -0,0 +1,236 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// DeletedAccountsClient is the the Azure Storage Management API.
+type DeletedAccountsClient struct {
+ BaseClient
+}
+
+// NewDeletedAccountsClient creates an instance of the DeletedAccountsClient client.
+func NewDeletedAccountsClient(subscriptionID string) DeletedAccountsClient {
+ return NewDeletedAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewDeletedAccountsClientWithBaseURI creates an instance of the DeletedAccountsClient client using a custom endpoint.
+// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewDeletedAccountsClientWithBaseURI(baseURI string, subscriptionID string) DeletedAccountsClient {
+ return DeletedAccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Get get properties of specified deleted account resource.
+// Parameters:
+// deletedAccountName - name of the deleted storage account.
+// location - the location of the deleted storage account.
+func (client DeletedAccountsClient) Get(ctx context.Context, deletedAccountName string, location string) (result DeletedAccount, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: deletedAccountName,
+ Constraints: []validation.Constraint{{Target: "deletedAccountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "deletedAccountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.DeletedAccountsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, deletedAccountName, location)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client DeletedAccountsClient) GetPreparer(ctx context.Context, deletedAccountName string, location string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "deletedAccountName": autorest.Encode("path", deletedAccountName),
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/deletedAccounts/{deletedAccountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeletedAccountsClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client DeletedAccountsClient) GetResponder(resp *http.Response) (result DeletedAccount, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists deleted accounts under the subscription.
+func (client DeletedAccountsClient) List(ctx context.Context) (result DeletedAccountListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.dalr.Response.Response != nil {
+ sc = result.dalr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.DeletedAccountsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.dalr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.dalr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.dalr.hasNextLink() && result.dalr.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client DeletedAccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/deletedAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeletedAccountsClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client DeletedAccountsClient) ListResponder(resp *http.Response) (result DeletedAccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client DeletedAccountsClient) listNextResults(ctx context.Context, lastResults DeletedAccountListResult) (result DeletedAccountListResult, err error) {
+ req, err := lastResults.deletedAccountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DeletedAccountsClient) ListComplete(ctx context.Context) (result DeletedAccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/encryptionscopes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/encryptionscopes.go
new file mode 100644
index 000000000000..0a120d35646b
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/encryptionscopes.go
@@ -0,0 +1,469 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// EncryptionScopesClient is the the Azure Storage Management API.
+type EncryptionScopesClient struct {
+ BaseClient
+}
+
+// NewEncryptionScopesClient creates an instance of the EncryptionScopesClient client.
+func NewEncryptionScopesClient(subscriptionID string) EncryptionScopesClient {
+ return NewEncryptionScopesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewEncryptionScopesClientWithBaseURI creates an instance of the EncryptionScopesClient client using a custom
+// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
+// stack).
+func NewEncryptionScopesClientWithBaseURI(baseURI string, subscriptionID string) EncryptionScopesClient {
+ return EncryptionScopesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Get returns the properties for the specified encryption scope.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption
+// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-)
+// only. Every dash (-) character must be immediately preceded and followed by a letter or number.
+func (client EncryptionScopesClient) Get(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (result EncryptionScope, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: encryptionScopeName,
+ Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.EncryptionScopesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, encryptionScopeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client EncryptionScopesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "encryptionScopeName": autorest.Encode("path", encryptionScopeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client EncryptionScopesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client EncryptionScopesClient) GetResponder(resp *http.Response) (result EncryptionScope, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all the encryption scopes available under the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client EncryptionScopesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result EncryptionScopeListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.List")
+ defer func() {
+ sc := -1
+ if result.eslr.Response.Response != nil {
+ sc = result.eslr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.EncryptionScopesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.eslr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.eslr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.eslr.hasNextLink() && result.eslr.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client EncryptionScopesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client EncryptionScopesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client EncryptionScopesClient) ListResponder(resp *http.Response) (result EncryptionScopeListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client EncryptionScopesClient) listNextResults(ctx context.Context, lastResults EncryptionScopeListResult) (result EncryptionScopeListResult, err error) {
+ req, err := lastResults.encryptionScopeListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client EncryptionScopesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result EncryptionScopeListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName)
+ return
+}
+
+// Patch update encryption scope properties as specified in the request body. Update fails if the specified encryption
+// scope does not already exist.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption
+// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-)
+// only. Every dash (-) character must be immediately preceded and followed by a letter or number.
+// encryptionScope - encryption scope properties to be used for the update.
+func (client EncryptionScopesClient) Patch(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (result EncryptionScope, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Patch")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: encryptionScopeName,
+ Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.EncryptionScopesClient", "Patch", err.Error())
+ }
+
+ req, err := client.PatchPreparer(ctx, resourceGroupName, accountName, encryptionScopeName, encryptionScope)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PatchSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PatchResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// PatchPreparer prepares the Patch request.
+func (client EncryptionScopesClient) PatchPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "encryptionScopeName": autorest.Encode("path", encryptionScopeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters),
+ autorest.WithJSON(encryptionScope),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PatchSender sends the Patch request. The method will close the
+// http.Response Body if it receives an error.
+func (client EncryptionScopesClient) PatchSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// PatchResponder handles the response to the Patch request. The method always
+// closes the http.Response Body.
+func (client EncryptionScopesClient) PatchResponder(resp *http.Response) (result EncryptionScope, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Put synchronously creates or updates an encryption scope under the specified storage account. If an encryption scope
+// is already created and a subsequent request is issued with different properties, the encryption scope properties
+// will be updated per the specified request.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption
+// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-)
+// only. Every dash (-) character must be immediately preceded and followed by a letter or number.
+// encryptionScope - encryption scope properties to be used for the create or update.
+func (client EncryptionScopesClient) Put(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (result EncryptionScope, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Put")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: encryptionScopeName,
+ Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.EncryptionScopesClient", "Put", err.Error())
+ }
+
+ req, err := client.PutPreparer(ctx, resourceGroupName, accountName, encryptionScopeName, encryptionScope)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PutSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PutResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// PutPreparer prepares the Put request.
+func (client EncryptionScopesClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "encryptionScopeName": autorest.Encode("path", encryptionScopeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters),
+ autorest.WithJSON(encryptionScope),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PutSender sends the Put request. The method will close the
+// http.Response Body if it receives an error.
+func (client EncryptionScopesClient) PutSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// PutResponder handles the response to the Put request. The method always
+// closes the http.Response Body.
+func (client EncryptionScopesClient) PutResponder(resp *http.Response) (result EncryptionScope, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/enums.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/enums.go
new file mode 100644
index 000000000000..bba1ff6410cc
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/enums.go
@@ -0,0 +1,1018 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// AccessTier enumerates the values for access tier.
+type AccessTier string
+
+const (
+ // AccessTierCool ...
+ AccessTierCool AccessTier = "Cool"
+ // AccessTierHot ...
+ AccessTierHot AccessTier = "Hot"
+ // AccessTierPremium ...
+ AccessTierPremium AccessTier = "Premium"
+)
+
+// PossibleAccessTierValues returns an array of possible values for the AccessTier const type.
+func PossibleAccessTierValues() []AccessTier {
+ return []AccessTier{AccessTierCool, AccessTierHot, AccessTierPremium}
+}
+
+// AccountExpand enumerates the values for account expand.
+type AccountExpand string
+
+const (
+ // AccountExpandBlobRestoreStatus ...
+ AccountExpandBlobRestoreStatus AccountExpand = "blobRestoreStatus"
+ // AccountExpandGeoReplicationStats ...
+ AccountExpandGeoReplicationStats AccountExpand = "geoReplicationStats"
+)
+
+// PossibleAccountExpandValues returns an array of possible values for the AccountExpand const type.
+func PossibleAccountExpandValues() []AccountExpand {
+ return []AccountExpand{AccountExpandBlobRestoreStatus, AccountExpandGeoReplicationStats}
+}
+
+// AccountImmutabilityPolicyState enumerates the values for account immutability policy state.
+type AccountImmutabilityPolicyState string
+
+const (
+ // AccountImmutabilityPolicyStateDisabled ...
+ AccountImmutabilityPolicyStateDisabled AccountImmutabilityPolicyState = "Disabled"
+ // AccountImmutabilityPolicyStateLocked ...
+ AccountImmutabilityPolicyStateLocked AccountImmutabilityPolicyState = "Locked"
+ // AccountImmutabilityPolicyStateUnlocked ...
+ AccountImmutabilityPolicyStateUnlocked AccountImmutabilityPolicyState = "Unlocked"
+)
+
+// PossibleAccountImmutabilityPolicyStateValues returns an array of possible values for the AccountImmutabilityPolicyState const type.
+func PossibleAccountImmutabilityPolicyStateValues() []AccountImmutabilityPolicyState {
+ return []AccountImmutabilityPolicyState{AccountImmutabilityPolicyStateDisabled, AccountImmutabilityPolicyStateLocked, AccountImmutabilityPolicyStateUnlocked}
+}
+
+// AccountStatus enumerates the values for account status.
+type AccountStatus string
+
+const (
+ // AccountStatusAvailable ...
+ AccountStatusAvailable AccountStatus = "available"
+ // AccountStatusUnavailable ...
+ AccountStatusUnavailable AccountStatus = "unavailable"
+)
+
+// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type.
+func PossibleAccountStatusValues() []AccountStatus {
+ return []AccountStatus{AccountStatusAvailable, AccountStatusUnavailable}
+}
+
+// AccountType enumerates the values for account type.
+type AccountType string
+
+const (
+ // AccountTypeComputer ...
+ AccountTypeComputer AccountType = "Computer"
+ // AccountTypeUser ...
+ AccountTypeUser AccountType = "User"
+)
+
+// PossibleAccountTypeValues returns an array of possible values for the AccountType const type.
+func PossibleAccountTypeValues() []AccountType {
+ return []AccountType{AccountTypeComputer, AccountTypeUser}
+}
+
+// Action enumerates the values for action.
+type Action string
+
+const (
+ // ActionAllow ...
+ ActionAllow Action = "Allow"
+)
+
+// PossibleActionValues returns an array of possible values for the Action const type.
+func PossibleActionValues() []Action {
+ return []Action{ActionAllow}
+}
+
+// Action1 enumerates the values for action 1.
+type Action1 string
+
+const (
+ // Action1Acquire ...
+ Action1Acquire Action1 = "Acquire"
+ // Action1Break ...
+ Action1Break Action1 = "Break"
+ // Action1Change ...
+ Action1Change Action1 = "Change"
+ // Action1Release ...
+ Action1Release Action1 = "Release"
+ // Action1Renew ...
+ Action1Renew Action1 = "Renew"
+)
+
+// PossibleAction1Values returns an array of possible values for the Action1 const type.
+func PossibleAction1Values() []Action1 {
+ return []Action1{Action1Acquire, Action1Break, Action1Change, Action1Release, Action1Renew}
+}
+
+// AllowedCopyScope enumerates the values for allowed copy scope.
+type AllowedCopyScope string
+
+const (
+ // AllowedCopyScopeAAD ...
+ AllowedCopyScopeAAD AllowedCopyScope = "AAD"
+ // AllowedCopyScopePrivateLink ...
+ AllowedCopyScopePrivateLink AllowedCopyScope = "PrivateLink"
+)
+
+// PossibleAllowedCopyScopeValues returns an array of possible values for the AllowedCopyScope const type.
+func PossibleAllowedCopyScopeValues() []AllowedCopyScope {
+ return []AllowedCopyScope{AllowedCopyScopeAAD, AllowedCopyScopePrivateLink}
+}
+
+// BlobRestoreProgressStatus enumerates the values for blob restore progress status.
+type BlobRestoreProgressStatus string
+
+const (
+ // BlobRestoreProgressStatusComplete ...
+ BlobRestoreProgressStatusComplete BlobRestoreProgressStatus = "Complete"
+ // BlobRestoreProgressStatusFailed ...
+ BlobRestoreProgressStatusFailed BlobRestoreProgressStatus = "Failed"
+ // BlobRestoreProgressStatusInProgress ...
+ BlobRestoreProgressStatusInProgress BlobRestoreProgressStatus = "InProgress"
+)
+
+// PossibleBlobRestoreProgressStatusValues returns an array of possible values for the BlobRestoreProgressStatus const type.
+func PossibleBlobRestoreProgressStatusValues() []BlobRestoreProgressStatus {
+ return []BlobRestoreProgressStatus{BlobRestoreProgressStatusComplete, BlobRestoreProgressStatusFailed, BlobRestoreProgressStatusInProgress}
+}
+
+// Bypass enumerates the values for bypass.
+type Bypass string
+
+const (
+ // BypassAzureServices ...
+ BypassAzureServices Bypass = "AzureServices"
+ // BypassLogging ...
+ BypassLogging Bypass = "Logging"
+ // BypassMetrics ...
+ BypassMetrics Bypass = "Metrics"
+ // BypassNone ...
+ BypassNone Bypass = "None"
+)
+
+// PossibleBypassValues returns an array of possible values for the Bypass const type.
+func PossibleBypassValues() []Bypass {
+ return []Bypass{BypassAzureServices, BypassLogging, BypassMetrics, BypassNone}
+}
+
+// CreatedByType enumerates the values for created by type.
+type CreatedByType string
+
+const (
+ // CreatedByTypeApplication ...
+ CreatedByTypeApplication CreatedByType = "Application"
+ // CreatedByTypeKey ...
+ CreatedByTypeKey CreatedByType = "Key"
+ // CreatedByTypeManagedIdentity ...
+ CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity"
+ // CreatedByTypeUser ...
+ CreatedByTypeUser CreatedByType = "User"
+)
+
+// PossibleCreatedByTypeValues returns an array of possible values for the CreatedByType const type.
+func PossibleCreatedByTypeValues() []CreatedByType {
+ return []CreatedByType{CreatedByTypeApplication, CreatedByTypeKey, CreatedByTypeManagedIdentity, CreatedByTypeUser}
+}
+
+// DefaultAction enumerates the values for default action.
+type DefaultAction string
+
+const (
+ // DefaultActionAllow ...
+ DefaultActionAllow DefaultAction = "Allow"
+ // DefaultActionDeny ...
+ DefaultActionDeny DefaultAction = "Deny"
+)
+
+// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type.
+func PossibleDefaultActionValues() []DefaultAction {
+ return []DefaultAction{DefaultActionAllow, DefaultActionDeny}
+}
+
+// DefaultSharePermission enumerates the values for default share permission.
+type DefaultSharePermission string
+
+const (
+ // DefaultSharePermissionNone ...
+ DefaultSharePermissionNone DefaultSharePermission = "None"
+ // DefaultSharePermissionStorageFileDataSmbShareContributor ...
+ DefaultSharePermissionStorageFileDataSmbShareContributor DefaultSharePermission = "StorageFileDataSmbShareContributor"
+ // DefaultSharePermissionStorageFileDataSmbShareElevatedContributor ...
+ DefaultSharePermissionStorageFileDataSmbShareElevatedContributor DefaultSharePermission = "StorageFileDataSmbShareElevatedContributor"
+ // DefaultSharePermissionStorageFileDataSmbShareReader ...
+ DefaultSharePermissionStorageFileDataSmbShareReader DefaultSharePermission = "StorageFileDataSmbShareReader"
+)
+
+// PossibleDefaultSharePermissionValues returns an array of possible values for the DefaultSharePermission const type.
+func PossibleDefaultSharePermissionValues() []DefaultSharePermission {
+ return []DefaultSharePermission{DefaultSharePermissionNone, DefaultSharePermissionStorageFileDataSmbShareContributor, DefaultSharePermissionStorageFileDataSmbShareElevatedContributor, DefaultSharePermissionStorageFileDataSmbShareReader}
+}
+
+// DirectoryServiceOptions enumerates the values for directory service options.
+type DirectoryServiceOptions string
+
+const (
+ // DirectoryServiceOptionsAADDS ...
+ DirectoryServiceOptionsAADDS DirectoryServiceOptions = "AADDS"
+ // DirectoryServiceOptionsAD ...
+ DirectoryServiceOptionsAD DirectoryServiceOptions = "AD"
+ // DirectoryServiceOptionsNone ...
+ DirectoryServiceOptionsNone DirectoryServiceOptions = "None"
+)
+
+// PossibleDirectoryServiceOptionsValues returns an array of possible values for the DirectoryServiceOptions const type.
+func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions {
+ return []DirectoryServiceOptions{DirectoryServiceOptionsAADDS, DirectoryServiceOptionsAD, DirectoryServiceOptionsNone}
+}
+
+// DNSEndpointType enumerates the values for dns endpoint type.
+type DNSEndpointType string
+
+const (
+ // DNSEndpointTypeAzureDNSZone ...
+ DNSEndpointTypeAzureDNSZone DNSEndpointType = "AzureDnsZone"
+ // DNSEndpointTypeStandard ...
+ DNSEndpointTypeStandard DNSEndpointType = "Standard"
+)
+
+// PossibleDNSEndpointTypeValues returns an array of possible values for the DNSEndpointType const type.
+func PossibleDNSEndpointTypeValues() []DNSEndpointType {
+ return []DNSEndpointType{DNSEndpointTypeAzureDNSZone, DNSEndpointTypeStandard}
+}
+
+// EnabledProtocols enumerates the values for enabled protocols.
+type EnabledProtocols string
+
+const (
+ // EnabledProtocolsNFS ...
+ EnabledProtocolsNFS EnabledProtocols = "NFS"
+ // EnabledProtocolsSMB ...
+ EnabledProtocolsSMB EnabledProtocols = "SMB"
+)
+
+// PossibleEnabledProtocolsValues returns an array of possible values for the EnabledProtocols const type.
+func PossibleEnabledProtocolsValues() []EnabledProtocols {
+ return []EnabledProtocols{EnabledProtocolsNFS, EnabledProtocolsSMB}
+}
+
+// EncryptionScopeSource enumerates the values for encryption scope source.
+type EncryptionScopeSource string
+
+const (
+ // EncryptionScopeSourceMicrosoftKeyVault ...
+ EncryptionScopeSourceMicrosoftKeyVault EncryptionScopeSource = "Microsoft.KeyVault"
+ // EncryptionScopeSourceMicrosoftStorage ...
+ EncryptionScopeSourceMicrosoftStorage EncryptionScopeSource = "Microsoft.Storage"
+)
+
+// PossibleEncryptionScopeSourceValues returns an array of possible values for the EncryptionScopeSource const type.
+func PossibleEncryptionScopeSourceValues() []EncryptionScopeSource {
+ return []EncryptionScopeSource{EncryptionScopeSourceMicrosoftKeyVault, EncryptionScopeSourceMicrosoftStorage}
+}
+
+// EncryptionScopeState enumerates the values for encryption scope state.
+type EncryptionScopeState string
+
+const (
+ // EncryptionScopeStateDisabled ...
+ EncryptionScopeStateDisabled EncryptionScopeState = "Disabled"
+ // EncryptionScopeStateEnabled ...
+ EncryptionScopeStateEnabled EncryptionScopeState = "Enabled"
+)
+
+// PossibleEncryptionScopeStateValues returns an array of possible values for the EncryptionScopeState const type.
+func PossibleEncryptionScopeStateValues() []EncryptionScopeState {
+ return []EncryptionScopeState{EncryptionScopeStateDisabled, EncryptionScopeStateEnabled}
+}
+
+// ExtendedLocationTypes enumerates the values for extended location types.
+type ExtendedLocationTypes string
+
+const (
+ // ExtendedLocationTypesEdgeZone ...
+ ExtendedLocationTypesEdgeZone ExtendedLocationTypes = "EdgeZone"
+)
+
+// PossibleExtendedLocationTypesValues returns an array of possible values for the ExtendedLocationTypes const type.
+func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes {
+ return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone}
+}
+
+// Format enumerates the values for format.
+type Format string
+
+const (
+ // FormatCsv ...
+ FormatCsv Format = "Csv"
+ // FormatParquet ...
+ FormatParquet Format = "Parquet"
+)
+
+// PossibleFormatValues returns an array of possible values for the Format const type.
+func PossibleFormatValues() []Format {
+ return []Format{FormatCsv, FormatParquet}
+}
+
+// GeoReplicationStatus enumerates the values for geo replication status.
+type GeoReplicationStatus string
+
+const (
+ // GeoReplicationStatusBootstrap ...
+ GeoReplicationStatusBootstrap GeoReplicationStatus = "Bootstrap"
+ // GeoReplicationStatusLive ...
+ GeoReplicationStatusLive GeoReplicationStatus = "Live"
+ // GeoReplicationStatusUnavailable ...
+ GeoReplicationStatusUnavailable GeoReplicationStatus = "Unavailable"
+)
+
+// PossibleGeoReplicationStatusValues returns an array of possible values for the GeoReplicationStatus const type.
+func PossibleGeoReplicationStatusValues() []GeoReplicationStatus {
+ return []GeoReplicationStatus{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusUnavailable}
+}
+
+// HTTPProtocol enumerates the values for http protocol.
+type HTTPProtocol string
+
+const (
+ // HTTPProtocolHTTPS ...
+ HTTPProtocolHTTPS HTTPProtocol = "https"
+ // HTTPProtocolHttpshttp ...
+ HTTPProtocolHttpshttp HTTPProtocol = "https,http"
+)
+
+// PossibleHTTPProtocolValues returns an array of possible values for the HTTPProtocol const type.
+func PossibleHTTPProtocolValues() []HTTPProtocol {
+ return []HTTPProtocol{HTTPProtocolHTTPS, HTTPProtocolHttpshttp}
+}
+
+// IdentityType enumerates the values for identity type.
+type IdentityType string
+
+const (
+ // IdentityTypeNone ...
+ IdentityTypeNone IdentityType = "None"
+ // IdentityTypeSystemAssigned ...
+ IdentityTypeSystemAssigned IdentityType = "SystemAssigned"
+ // IdentityTypeSystemAssignedUserAssigned ...
+ IdentityTypeSystemAssignedUserAssigned IdentityType = "SystemAssigned,UserAssigned"
+ // IdentityTypeUserAssigned ...
+ IdentityTypeUserAssigned IdentityType = "UserAssigned"
+)
+
+// PossibleIdentityTypeValues returns an array of possible values for the IdentityType const type.
+func PossibleIdentityTypeValues() []IdentityType {
+ return []IdentityType{IdentityTypeNone, IdentityTypeSystemAssigned, IdentityTypeSystemAssignedUserAssigned, IdentityTypeUserAssigned}
+}
+
+// ImmutabilityPolicyState enumerates the values for immutability policy state.
+type ImmutabilityPolicyState string
+
+const (
+ // ImmutabilityPolicyStateLocked ...
+ ImmutabilityPolicyStateLocked ImmutabilityPolicyState = "Locked"
+ // ImmutabilityPolicyStateUnlocked ...
+ ImmutabilityPolicyStateUnlocked ImmutabilityPolicyState = "Unlocked"
+)
+
+// PossibleImmutabilityPolicyStateValues returns an array of possible values for the ImmutabilityPolicyState const type.
+func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState {
+ return []ImmutabilityPolicyState{ImmutabilityPolicyStateLocked, ImmutabilityPolicyStateUnlocked}
+}
+
+// ImmutabilityPolicyUpdateType enumerates the values for immutability policy update type.
+type ImmutabilityPolicyUpdateType string
+
+const (
+ // ImmutabilityPolicyUpdateTypeExtend ...
+ ImmutabilityPolicyUpdateTypeExtend ImmutabilityPolicyUpdateType = "extend"
+ // ImmutabilityPolicyUpdateTypeLock ...
+ ImmutabilityPolicyUpdateTypeLock ImmutabilityPolicyUpdateType = "lock"
+ // ImmutabilityPolicyUpdateTypePut ...
+ ImmutabilityPolicyUpdateTypePut ImmutabilityPolicyUpdateType = "put"
+)
+
+// PossibleImmutabilityPolicyUpdateTypeValues returns an array of possible values for the ImmutabilityPolicyUpdateType const type.
+func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType {
+ return []ImmutabilityPolicyUpdateType{ImmutabilityPolicyUpdateTypeExtend, ImmutabilityPolicyUpdateTypeLock, ImmutabilityPolicyUpdateTypePut}
+}
+
+// KeyPermission enumerates the values for key permission.
+type KeyPermission string
+
+const (
+ // KeyPermissionFull ...
+ KeyPermissionFull KeyPermission = "Full"
+ // KeyPermissionRead ...
+ KeyPermissionRead KeyPermission = "Read"
+)
+
+// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type.
+func PossibleKeyPermissionValues() []KeyPermission {
+ return []KeyPermission{KeyPermissionFull, KeyPermissionRead}
+}
+
+// KeySource enumerates the values for key source.
+type KeySource string
+
+const (
+ // KeySourceMicrosoftKeyvault ...
+ KeySourceMicrosoftKeyvault KeySource = "Microsoft.Keyvault"
+ // KeySourceMicrosoftStorage ...
+ KeySourceMicrosoftStorage KeySource = "Microsoft.Storage"
+)
+
+// PossibleKeySourceValues returns an array of possible values for the KeySource const type.
+func PossibleKeySourceValues() []KeySource {
+ return []KeySource{KeySourceMicrosoftKeyvault, KeySourceMicrosoftStorage}
+}
+
+// KeyType enumerates the values for key type.
+type KeyType string
+
+const (
+ // KeyTypeAccount ...
+ KeyTypeAccount KeyType = "Account"
+ // KeyTypeService ...
+ KeyTypeService KeyType = "Service"
+)
+
+// PossibleKeyTypeValues returns an array of possible values for the KeyType const type.
+func PossibleKeyTypeValues() []KeyType {
+ return []KeyType{KeyTypeAccount, KeyTypeService}
+}
+
+// Kind enumerates the values for kind.
+type Kind string
+
+const (
+ // KindBlobStorage ...
+ KindBlobStorage Kind = "BlobStorage"
+ // KindBlockBlobStorage ...
+ KindBlockBlobStorage Kind = "BlockBlobStorage"
+ // KindFileStorage ...
+ KindFileStorage Kind = "FileStorage"
+ // KindStorage ...
+ KindStorage Kind = "Storage"
+ // KindStorageV2 ...
+ KindStorageV2 Kind = "StorageV2"
+)
+
+// PossibleKindValues returns an array of possible values for the Kind const type.
+func PossibleKindValues() []Kind {
+ return []Kind{KindBlobStorage, KindBlockBlobStorage, KindFileStorage, KindStorage, KindStorageV2}
+}
+
+// LargeFileSharesState enumerates the values for large file shares state.
+type LargeFileSharesState string
+
+const (
+ // LargeFileSharesStateDisabled ...
+ LargeFileSharesStateDisabled LargeFileSharesState = "Disabled"
+ // LargeFileSharesStateEnabled ...
+ LargeFileSharesStateEnabled LargeFileSharesState = "Enabled"
+)
+
+// PossibleLargeFileSharesStateValues returns an array of possible values for the LargeFileSharesState const type.
+func PossibleLargeFileSharesStateValues() []LargeFileSharesState {
+ return []LargeFileSharesState{LargeFileSharesStateDisabled, LargeFileSharesStateEnabled}
+}
+
+// LeaseDuration enumerates the values for lease duration.
+type LeaseDuration string
+
+const (
+ // LeaseDurationFixed ...
+ LeaseDurationFixed LeaseDuration = "Fixed"
+ // LeaseDurationInfinite ...
+ LeaseDurationInfinite LeaseDuration = "Infinite"
+)
+
+// PossibleLeaseDurationValues returns an array of possible values for the LeaseDuration const type.
+func PossibleLeaseDurationValues() []LeaseDuration {
+ return []LeaseDuration{LeaseDurationFixed, LeaseDurationInfinite}
+}
+
+// LeaseShareAction enumerates the values for lease share action.
+type LeaseShareAction string
+
+const (
+ // LeaseShareActionAcquire ...
+ LeaseShareActionAcquire LeaseShareAction = "Acquire"
+ // LeaseShareActionBreak ...
+ LeaseShareActionBreak LeaseShareAction = "Break"
+ // LeaseShareActionChange ...
+ LeaseShareActionChange LeaseShareAction = "Change"
+ // LeaseShareActionRelease ...
+ LeaseShareActionRelease LeaseShareAction = "Release"
+ // LeaseShareActionRenew ...
+ LeaseShareActionRenew LeaseShareAction = "Renew"
+)
+
+// PossibleLeaseShareActionValues returns an array of possible values for the LeaseShareAction const type.
+func PossibleLeaseShareActionValues() []LeaseShareAction {
+ return []LeaseShareAction{LeaseShareActionAcquire, LeaseShareActionBreak, LeaseShareActionChange, LeaseShareActionRelease, LeaseShareActionRenew}
+}
+
+// LeaseState enumerates the values for lease state.
+type LeaseState string
+
+const (
+ // LeaseStateAvailable ...
+ LeaseStateAvailable LeaseState = "Available"
+ // LeaseStateBreaking ...
+ LeaseStateBreaking LeaseState = "Breaking"
+ // LeaseStateBroken ...
+ LeaseStateBroken LeaseState = "Broken"
+ // LeaseStateExpired ...
+ LeaseStateExpired LeaseState = "Expired"
+ // LeaseStateLeased ...
+ LeaseStateLeased LeaseState = "Leased"
+)
+
+// PossibleLeaseStateValues returns an array of possible values for the LeaseState const type.
+func PossibleLeaseStateValues() []LeaseState {
+ return []LeaseState{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased}
+}
+
+// LeaseStatus enumerates the values for lease status.
+type LeaseStatus string
+
+const (
+ // LeaseStatusLocked ...
+ LeaseStatusLocked LeaseStatus = "Locked"
+ // LeaseStatusUnlocked ...
+ LeaseStatusUnlocked LeaseStatus = "Unlocked"
+)
+
+// PossibleLeaseStatusValues returns an array of possible values for the LeaseStatus const type.
+func PossibleLeaseStatusValues() []LeaseStatus {
+ return []LeaseStatus{LeaseStatusLocked, LeaseStatusUnlocked}
+}
+
+// ListContainersInclude enumerates the values for list containers include.
+type ListContainersInclude string
+
+const (
+ // ListContainersIncludeDeleted ...
+ ListContainersIncludeDeleted ListContainersInclude = "deleted"
+)
+
+// PossibleListContainersIncludeValues returns an array of possible values for the ListContainersInclude const type.
+func PossibleListContainersIncludeValues() []ListContainersInclude {
+ return []ListContainersInclude{ListContainersIncludeDeleted}
+}
+
+// ListKeyExpand enumerates the values for list key expand.
+type ListKeyExpand string
+
+const (
+ // ListKeyExpandKerb ...
+ ListKeyExpandKerb ListKeyExpand = "kerb"
+)
+
+// PossibleListKeyExpandValues returns an array of possible values for the ListKeyExpand const type.
+func PossibleListKeyExpandValues() []ListKeyExpand {
+ return []ListKeyExpand{ListKeyExpandKerb}
+}
+
+// MigrationState enumerates the values for migration state.
+type MigrationState string
+
+const (
+ // MigrationStateCompleted ...
+ MigrationStateCompleted MigrationState = "Completed"
+ // MigrationStateInProgress ...
+ MigrationStateInProgress MigrationState = "InProgress"
+)
+
+// PossibleMigrationStateValues returns an array of possible values for the MigrationState const type.
+func PossibleMigrationStateValues() []MigrationState {
+ return []MigrationState{MigrationStateCompleted, MigrationStateInProgress}
+}
+
+// MinimumTLSVersion enumerates the values for minimum tls version.
+type MinimumTLSVersion string
+
+const (
+ // MinimumTLSVersionTLS10 ...
+ MinimumTLSVersionTLS10 MinimumTLSVersion = "TLS1_0"
+ // MinimumTLSVersionTLS11 ...
+ MinimumTLSVersionTLS11 MinimumTLSVersion = "TLS1_1"
+ // MinimumTLSVersionTLS12 ...
+ MinimumTLSVersionTLS12 MinimumTLSVersion = "TLS1_2"
+)
+
+// PossibleMinimumTLSVersionValues returns an array of possible values for the MinimumTLSVersion const type.
+func PossibleMinimumTLSVersionValues() []MinimumTLSVersion {
+ return []MinimumTLSVersion{MinimumTLSVersionTLS10, MinimumTLSVersionTLS11, MinimumTLSVersionTLS12}
+}
+
+// Name enumerates the values for name.
+type Name string
+
+const (
+ // NameAccessTimeTracking ...
+ NameAccessTimeTracking Name = "AccessTimeTracking"
+)
+
+// PossibleNameValues returns an array of possible values for the Name const type.
+func PossibleNameValues() []Name {
+ return []Name{NameAccessTimeTracking}
+}
+
+// ObjectType enumerates the values for object type.
+type ObjectType string
+
+const (
+ // ObjectTypeBlob ...
+ ObjectTypeBlob ObjectType = "Blob"
+ // ObjectTypeContainer ...
+ ObjectTypeContainer ObjectType = "Container"
+)
+
+// PossibleObjectTypeValues returns an array of possible values for the ObjectType const type.
+func PossibleObjectTypeValues() []ObjectType {
+ return []ObjectType{ObjectTypeBlob, ObjectTypeContainer}
+}
+
+// Permissions enumerates the values for permissions.
+type Permissions string
+
+const (
+ // PermissionsA ...
+ PermissionsA Permissions = "a"
+ // PermissionsC ...
+ PermissionsC Permissions = "c"
+ // PermissionsD ...
+ PermissionsD Permissions = "d"
+ // PermissionsL ...
+ PermissionsL Permissions = "l"
+ // PermissionsP ...
+ PermissionsP Permissions = "p"
+ // PermissionsR ...
+ PermissionsR Permissions = "r"
+ // PermissionsU ...
+ PermissionsU Permissions = "u"
+ // PermissionsW ...
+ PermissionsW Permissions = "w"
+)
+
+// PossiblePermissionsValues returns an array of possible values for the Permissions const type.
+func PossiblePermissionsValues() []Permissions {
+ return []Permissions{PermissionsA, PermissionsC, PermissionsD, PermissionsL, PermissionsP, PermissionsR, PermissionsU, PermissionsW}
+}
+
+// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection
+// provisioning state.
+type PrivateEndpointConnectionProvisioningState string
+
+const (
+ // PrivateEndpointConnectionProvisioningStateCreating ...
+ PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating"
+ // PrivateEndpointConnectionProvisioningStateDeleting ...
+ PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting"
+ // PrivateEndpointConnectionProvisioningStateFailed ...
+ PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed"
+ // PrivateEndpointConnectionProvisioningStateSucceeded ...
+ PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded"
+)
+
+// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type.
+func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState {
+ return []PrivateEndpointConnectionProvisioningState{PrivateEndpointConnectionProvisioningStateCreating, PrivateEndpointConnectionProvisioningStateDeleting, PrivateEndpointConnectionProvisioningStateFailed, PrivateEndpointConnectionProvisioningStateSucceeded}
+}
+
+// PrivateEndpointServiceConnectionStatus enumerates the values for private endpoint service connection status.
+type PrivateEndpointServiceConnectionStatus string
+
+const (
+ // PrivateEndpointServiceConnectionStatusApproved ...
+ PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved"
+ // PrivateEndpointServiceConnectionStatusPending ...
+ PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending"
+ // PrivateEndpointServiceConnectionStatusRejected ...
+ PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected"
+)
+
+// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type.
+func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
+ return []PrivateEndpointServiceConnectionStatus{PrivateEndpointServiceConnectionStatusApproved, PrivateEndpointServiceConnectionStatusPending, PrivateEndpointServiceConnectionStatusRejected}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // ProvisioningStateCreating ...
+ ProvisioningStateCreating ProvisioningState = "Creating"
+ // ProvisioningStateResolvingDNS ...
+ ProvisioningStateResolvingDNS ProvisioningState = "ResolvingDNS"
+ // ProvisioningStateSucceeded ...
+ ProvisioningStateSucceeded ProvisioningState = "Succeeded"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateResolvingDNS, ProvisioningStateSucceeded}
+}
+
+// PublicAccess enumerates the values for public access.
+type PublicAccess string
+
+const (
+ // PublicAccessBlob ...
+ PublicAccessBlob PublicAccess = "Blob"
+ // PublicAccessContainer ...
+ PublicAccessContainer PublicAccess = "Container"
+ // PublicAccessNone ...
+ PublicAccessNone PublicAccess = "None"
+)
+
+// PossiblePublicAccessValues returns an array of possible values for the PublicAccess const type.
+func PossiblePublicAccessValues() []PublicAccess {
+ return []PublicAccess{PublicAccessBlob, PublicAccessContainer, PublicAccessNone}
+}
+
+// PublicNetworkAccess enumerates the values for public network access.
+type PublicNetworkAccess string
+
+const (
+ // PublicNetworkAccessDisabled ...
+ PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled"
+ // PublicNetworkAccessEnabled ...
+ PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled"
+)
+
+// PossiblePublicNetworkAccessValues returns an array of possible values for the PublicNetworkAccess const type.
+func PossiblePublicNetworkAccessValues() []PublicNetworkAccess {
+ return []PublicNetworkAccess{PublicNetworkAccessDisabled, PublicNetworkAccessEnabled}
+}
+
+// Reason enumerates the values for reason.
+type Reason string
+
+const (
+ // ReasonAccountNameInvalid ...
+ ReasonAccountNameInvalid Reason = "AccountNameInvalid"
+ // ReasonAlreadyExists ...
+ ReasonAlreadyExists Reason = "AlreadyExists"
+)
+
+// PossibleReasonValues returns an array of possible values for the Reason const type.
+func PossibleReasonValues() []Reason {
+ return []Reason{ReasonAccountNameInvalid, ReasonAlreadyExists}
+}
+
+// ReasonCode enumerates the values for reason code.
+type ReasonCode string
+
+const (
+ // ReasonCodeNotAvailableForSubscription ...
+ ReasonCodeNotAvailableForSubscription ReasonCode = "NotAvailableForSubscription"
+ // ReasonCodeQuotaID ...
+ ReasonCodeQuotaID ReasonCode = "QuotaId"
+)
+
+// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type.
+func PossibleReasonCodeValues() []ReasonCode {
+ return []ReasonCode{ReasonCodeNotAvailableForSubscription, ReasonCodeQuotaID}
+}
+
+// RootSquashType enumerates the values for root squash type.
+type RootSquashType string
+
+const (
+ // RootSquashTypeAllSquash ...
+ RootSquashTypeAllSquash RootSquashType = "AllSquash"
+ // RootSquashTypeNoRootSquash ...
+ RootSquashTypeNoRootSquash RootSquashType = "NoRootSquash"
+ // RootSquashTypeRootSquash ...
+ RootSquashTypeRootSquash RootSquashType = "RootSquash"
+)
+
+// PossibleRootSquashTypeValues returns an array of possible values for the RootSquashType const type.
+func PossibleRootSquashTypeValues() []RootSquashType {
+ return []RootSquashType{RootSquashTypeAllSquash, RootSquashTypeNoRootSquash, RootSquashTypeRootSquash}
+}
+
+// RoutingChoice enumerates the values for routing choice.
+type RoutingChoice string
+
+const (
+ // RoutingChoiceInternetRouting ...
+ RoutingChoiceInternetRouting RoutingChoice = "InternetRouting"
+ // RoutingChoiceMicrosoftRouting ...
+ RoutingChoiceMicrosoftRouting RoutingChoice = "MicrosoftRouting"
+)
+
+// PossibleRoutingChoiceValues returns an array of possible values for the RoutingChoice const type.
+func PossibleRoutingChoiceValues() []RoutingChoice {
+ return []RoutingChoice{RoutingChoiceInternetRouting, RoutingChoiceMicrosoftRouting}
+}
+
+// Schedule enumerates the values for schedule.
+type Schedule string
+
+const (
+ // ScheduleDaily ...
+ ScheduleDaily Schedule = "Daily"
+ // ScheduleWeekly ...
+ ScheduleWeekly Schedule = "Weekly"
+)
+
+// PossibleScheduleValues returns an array of possible values for the Schedule const type.
+func PossibleScheduleValues() []Schedule {
+ return []Schedule{ScheduleDaily, ScheduleWeekly}
+}
+
+// Services enumerates the values for services.
+type Services string
+
+const (
+ // ServicesB ...
+ ServicesB Services = "b"
+ // ServicesF ...
+ ServicesF Services = "f"
+ // ServicesQ ...
+ ServicesQ Services = "q"
+ // ServicesT ...
+ ServicesT Services = "t"
+)
+
+// PossibleServicesValues returns an array of possible values for the Services const type.
+func PossibleServicesValues() []Services {
+ return []Services{ServicesB, ServicesF, ServicesQ, ServicesT}
+}
+
+// ShareAccessTier enumerates the values for share access tier.
+type ShareAccessTier string
+
+const (
+ // ShareAccessTierCool ...
+ ShareAccessTierCool ShareAccessTier = "Cool"
+ // ShareAccessTierHot ...
+ ShareAccessTierHot ShareAccessTier = "Hot"
+ // ShareAccessTierPremium ...
+ ShareAccessTierPremium ShareAccessTier = "Premium"
+ // ShareAccessTierTransactionOptimized ...
+ ShareAccessTierTransactionOptimized ShareAccessTier = "TransactionOptimized"
+)
+
+// PossibleShareAccessTierValues returns an array of possible values for the ShareAccessTier const type.
+func PossibleShareAccessTierValues() []ShareAccessTier {
+ return []ShareAccessTier{ShareAccessTierCool, ShareAccessTierHot, ShareAccessTierPremium, ShareAccessTierTransactionOptimized}
+}
+
+// SignedResource enumerates the values for signed resource.
+type SignedResource string
+
+const (
+ // SignedResourceB ...
+ SignedResourceB SignedResource = "b"
+ // SignedResourceC ...
+ SignedResourceC SignedResource = "c"
+ // SignedResourceF ...
+ SignedResourceF SignedResource = "f"
+ // SignedResourceS ...
+ SignedResourceS SignedResource = "s"
+)
+
+// PossibleSignedResourceValues returns an array of possible values for the SignedResource const type.
+func PossibleSignedResourceValues() []SignedResource {
+ return []SignedResource{SignedResourceB, SignedResourceC, SignedResourceF, SignedResourceS}
+}
+
+// SignedResourceTypes enumerates the values for signed resource types.
+type SignedResourceTypes string
+
+const (
+ // SignedResourceTypesC ...
+ SignedResourceTypesC SignedResourceTypes = "c"
+ // SignedResourceTypesO ...
+ SignedResourceTypesO SignedResourceTypes = "o"
+ // SignedResourceTypesS ...
+ SignedResourceTypesS SignedResourceTypes = "s"
+)
+
+// PossibleSignedResourceTypesValues returns an array of possible values for the SignedResourceTypes const type.
+func PossibleSignedResourceTypesValues() []SignedResourceTypes {
+ return []SignedResourceTypes{SignedResourceTypesC, SignedResourceTypesO, SignedResourceTypesS}
+}
+
+// SkuConversionStatus enumerates the values for sku conversion status.
+type SkuConversionStatus string
+
+const (
+ // SkuConversionStatusFailed ...
+ SkuConversionStatusFailed SkuConversionStatus = "Failed"
+ // SkuConversionStatusInProgress ...
+ SkuConversionStatusInProgress SkuConversionStatus = "InProgress"
+ // SkuConversionStatusSucceeded ...
+ SkuConversionStatusSucceeded SkuConversionStatus = "Succeeded"
+)
+
+// PossibleSkuConversionStatusValues returns an array of possible values for the SkuConversionStatus const type.
+func PossibleSkuConversionStatusValues() []SkuConversionStatus {
+ return []SkuConversionStatus{SkuConversionStatusFailed, SkuConversionStatusInProgress, SkuConversionStatusSucceeded}
+}
+
+// SkuName enumerates the values for sku name.
+type SkuName string
+
+const (
+ // SkuNamePremiumLRS ...
+ SkuNamePremiumLRS SkuName = "Premium_LRS"
+ // SkuNamePremiumZRS ...
+ SkuNamePremiumZRS SkuName = "Premium_ZRS"
+ // SkuNameStandardGRS ...
+ SkuNameStandardGRS SkuName = "Standard_GRS"
+ // SkuNameStandardGZRS ...
+ SkuNameStandardGZRS SkuName = "Standard_GZRS"
+ // SkuNameStandardLRS ...
+ SkuNameStandardLRS SkuName = "Standard_LRS"
+ // SkuNameStandardRAGRS ...
+ SkuNameStandardRAGRS SkuName = "Standard_RAGRS"
+ // SkuNameStandardRAGZRS ...
+ SkuNameStandardRAGZRS SkuName = "Standard_RAGZRS"
+ // SkuNameStandardZRS ...
+ SkuNameStandardZRS SkuName = "Standard_ZRS"
+)
+
+// PossibleSkuNameValues returns an array of possible values for the SkuName const type.
+func PossibleSkuNameValues() []SkuName {
+ return []SkuName{SkuNamePremiumLRS, SkuNamePremiumZRS, SkuNameStandardGRS, SkuNameStandardGZRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardRAGZRS, SkuNameStandardZRS}
+}
+
+// SkuTier enumerates the values for sku tier.
+type SkuTier string
+
+const (
+ // SkuTierPremium ...
+ SkuTierPremium SkuTier = "Premium"
+ // SkuTierStandard ...
+ SkuTierStandard SkuTier = "Standard"
+)
+
+// PossibleSkuTierValues returns an array of possible values for the SkuTier const type.
+func PossibleSkuTierValues() []SkuTier {
+ return []SkuTier{SkuTierPremium, SkuTierStandard}
+}
+
+// State enumerates the values for state.
+type State string
+
+const (
+ // StateDeprovisioning ...
+ StateDeprovisioning State = "Deprovisioning"
+ // StateFailed ...
+ StateFailed State = "Failed"
+ // StateNetworkSourceDeleted ...
+ StateNetworkSourceDeleted State = "NetworkSourceDeleted"
+ // StateProvisioning ...
+ StateProvisioning State = "Provisioning"
+ // StateSucceeded ...
+ StateSucceeded State = "Succeeded"
+)
+
+// PossibleStateValues returns an array of possible values for the State const type.
+func PossibleStateValues() []State {
+ return []State{StateDeprovisioning, StateFailed, StateNetworkSourceDeleted, StateProvisioning, StateSucceeded}
+}
+
+// UsageUnit enumerates the values for usage unit.
+type UsageUnit string
+
+const (
+ // UsageUnitBytes ...
+ UsageUnitBytes UsageUnit = "Bytes"
+ // UsageUnitBytesPerSecond ...
+ UsageUnitBytesPerSecond UsageUnit = "BytesPerSecond"
+ // UsageUnitCount ...
+ UsageUnitCount UsageUnit = "Count"
+ // UsageUnitCountsPerSecond ...
+ UsageUnitCountsPerSecond UsageUnit = "CountsPerSecond"
+ // UsageUnitPercent ...
+ UsageUnitPercent UsageUnit = "Percent"
+ // UsageUnitSeconds ...
+ UsageUnitSeconds UsageUnit = "Seconds"
+)
+
+// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type.
+func PossibleUsageUnitValues() []UsageUnit {
+ return []UsageUnit{UsageUnitBytes, UsageUnitBytesPerSecond, UsageUnitCount, UsageUnitCountsPerSecond, UsageUnitPercent, UsageUnitSeconds}
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/fileservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/fileservices.go
new file mode 100644
index 000000000000..57c977db71fa
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/fileservices.go
@@ -0,0 +1,323 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FileServicesClient is the the Azure Storage Management API.
+type FileServicesClient struct {
+ BaseClient
+}
+
+// NewFileServicesClient creates an instance of the FileServicesClient client.
+func NewFileServicesClient(subscriptionID string) FileServicesClient {
+ return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client using a custom endpoint. Use
+// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
+ return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "FileServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list all file services in storage accounts
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "FileServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.Sku = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/fileshares.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/fileshares.go
new file mode 100644
index 000000000000..bd6cfa5eb1c2
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/fileshares.go
@@ -0,0 +1,824 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FileSharesClient is the the Azure Storage Management API.
+type FileSharesClient struct {
+ BaseClient
+}
+
+// NewFileSharesClient creates an instance of the FileSharesClient client.
+func NewFileSharesClient(subscriptionID string) FileSharesClient {
+ return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client using a custom endpoint. Use this
+// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
+ return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new share under the specified account as described by request body. The share resource includes
+// metadata and properties for that share. It does not include a list of the files contained by the share.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// fileShare - properties of the file share to create.
+// expand - optional, used to expand the properties within share's properties. Valid values are: snapshots.
+// Should be passed as a string with delimiter ','
+func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare, expand string) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: fileShare,
+ Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(102400), Chain: nil},
+ {Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
+ }},
+ }}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare, expand string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithJSON(fileShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes specified share under its account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// xMsSnapshot - optional, used to delete a snapshot.
+// include - optional. Valid values are: snapshots, leased-snapshots, none. The default value is snapshots. For
+// 'snapshots', the file share is deleted including all of its file share snapshots. If the file share contains
+// leased-snapshots, the deletion fails. For 'leased-snapshots', the file share is deleted included all of its
+// file share snapshots (leased/unleased). For 'none', the file share is deleted if it has no share snapshots.
+// If the file share contains any snapshots (leased or unleased), the deletion fails.
+func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string, xMsSnapshot string, include string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName, xMsSnapshot, include)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, xMsSnapshot string, include string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(include) > 0 {
+ queryParameters["$include"] = autorest.Encode("query", include)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(xMsSnapshot) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets properties of a specified share.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// expand - optional, used to expand the properties within share's properties. Valid values are: stats. Should
+// be passed as a string with delimiter ','.
+// xMsSnapshot - optional, used to retrieve properties of a snapshot.
+func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string, expand string, xMsSnapshot string) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName, expand, xMsSnapshot)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, expand string, xMsSnapshot string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(xMsSnapshot) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Lease the Lease Share operation establishes and manages a lock on a share for delete operations. The lock duration
+// can be 15 to 60 seconds, or can be infinite.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// parameters - lease Share request body.
+// xMsSnapshot - optional. Specify the snapshot time to lease a snapshot.
+func (client FileSharesClient) Lease(ctx context.Context, resourceGroupName string, accountName string, shareName string, parameters *LeaseShareRequest, xMsSnapshot string) (result LeaseShareResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Lease")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Lease", err.Error())
+ }
+
+ req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, shareName, parameters, xMsSnapshot)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Lease", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.LeaseSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Lease", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.LeaseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Lease", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// LeasePreparer prepares the Lease request.
+func (client FileSharesClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, parameters *LeaseShareRequest, xMsSnapshot string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/lease", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ if len(xMsSnapshot) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// LeaseSender sends the Lease request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) LeaseSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// LeaseResponder handles the response to the Lease request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) LeaseResponder(resp *http.Response) (result LeaseShareResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all shares.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// maxpagesize - optional. Specified maximum number of shares that can be included in the list.
+// filter - optional. When specified, only share names starting with the filter will be listed.
+// expand - optional, used to expand the properties within share's properties. Valid values are: deleted,
+// snapshots. Should be passed as a string with delimiter ','
+func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand string) (result FileShareItemsPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
+ defer func() {
+ sc := -1
+ if result.fsi.Response.Response != nil {
+ sc = result.fsi.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.fsi.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.fsi, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.fsi.hasNextLink() && result.fsi.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(maxpagesize) > 0 {
+ queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) {
+ req, err := lastResults.fileShareItemsPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand string) (result FileShareItemsIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter, expand)
+ return
+}
+
+// Restore restore a file share within a valid retention days if share soft delete is enabled
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+func (client FileSharesClient) Restore(ctx context.Context, resourceGroupName string, accountName string, shareName string, deletedShare DeletedShare) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Restore")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: deletedShare,
+ Constraints: []validation.Constraint{{Target: "deletedShare.DeletedShareName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "deletedShare.DeletedShareVersion", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Restore", err.Error())
+ }
+
+ req, err := client.RestorePreparer(ctx, resourceGroupName, accountName, shareName, deletedShare)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RestoreSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RestoreResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// RestorePreparer prepares the Restore request.
+func (client FileSharesClient) RestorePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, deletedShare DeletedShare) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore", pathParameters),
+ autorest.WithJSON(deletedShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RestoreSender sends the Restore request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) RestoreSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// RestoreResponder handles the response to the Restore request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) RestoreResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update updates share properties as specified in request body. Properties not mentioned in the request will not be
+// changed. Update fails if the specified share does not already exist.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// fileShare - properties to update for the file share.
+func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithJSON(fileShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/localusers.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/localusers.go
new file mode 100644
index 000000000000..4552d1e63ece
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/localusers.go
@@ -0,0 +1,610 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// LocalUsersClient is the the Azure Storage Management API.
+type LocalUsersClient struct {
+ BaseClient
+}
+
+// NewLocalUsersClient creates an instance of the LocalUsersClient client.
+func NewLocalUsersClient(subscriptionID string) LocalUsersClient {
+ return NewLocalUsersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewLocalUsersClientWithBaseURI creates an instance of the LocalUsersClient client using a custom endpoint. Use this
+// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewLocalUsersClientWithBaseURI(baseURI string, subscriptionID string) LocalUsersClient {
+ return LocalUsersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate create or update the properties of a local user associated with the storage account
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// username - the name of local user. The username must contain lowercase letters and numbers only. It must be
+// unique only within the storage account.
+// properties - the local user associated with a storage account.
+func (client LocalUsersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, username string, properties LocalUser) (result LocalUser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocalUsersClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: username,
+ Constraints: []validation.Constraint{{Target: "username", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "username", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.LocalUsersClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, username, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "CreateOrUpdate", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client LocalUsersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, username string, properties LocalUser) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "username": autorest.Encode("path", username),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/localUsers/{username}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocalUsersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client LocalUsersClient) CreateOrUpdateResponder(resp *http.Response) (result LocalUser, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the local user associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// username - the name of local user. The username must contain lowercase letters and numbers only. It must be
+// unique only within the storage account.
+func (client LocalUsersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, username string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocalUsersClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: username,
+ Constraints: []validation.Constraint{{Target: "username", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "username", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.LocalUsersClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, username)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client LocalUsersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, username string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "username": autorest.Encode("path", username),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/localUsers/{username}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocalUsersClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client LocalUsersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get the local user of the storage account by username.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// username - the name of local user. The username must contain lowercase letters and numbers only. It must be
+// unique only within the storage account.
+func (client LocalUsersClient) Get(ctx context.Context, resourceGroupName string, accountName string, username string) (result LocalUser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocalUsersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: username,
+ Constraints: []validation.Constraint{{Target: "username", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "username", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.LocalUsersClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, username)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client LocalUsersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, username string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "username": autorest.Encode("path", username),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/localUsers/{username}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocalUsersClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client LocalUsersClient) GetResponder(resp *http.Response) (result LocalUser, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list the local users associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client LocalUsersClient) List(ctx context.Context, resourceGroupName string, accountName string) (result LocalUsers, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocalUsersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.LocalUsersClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client LocalUsersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/localUsers", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocalUsersClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client LocalUsersClient) ListResponder(resp *http.Response) (result LocalUsers, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListKeys list SSH authorized keys and shared key of the local user.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// username - the name of local user. The username must contain lowercase letters and numbers only. It must be
+// unique only within the storage account.
+func (client LocalUsersClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string, username string) (result LocalUserKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocalUsersClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: username,
+ Constraints: []validation.Constraint{{Target: "username", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "username", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.LocalUsersClient", "ListKeys", err.Error())
+ }
+
+ req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName, username)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "ListKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "ListKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "ListKeys", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListKeysPreparer prepares the ListKeys request.
+func (client LocalUsersClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, username string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "username": autorest.Encode("path", username),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/localUsers/{username}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListKeysSender sends the ListKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocalUsersClient) ListKeysSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListKeysResponder handles the response to the ListKeys request. The method always
+// closes the http.Response Body.
+func (client LocalUsersClient) ListKeysResponder(resp *http.Response) (result LocalUserKeys, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RegeneratePassword regenerate the local user SSH password.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// username - the name of local user. The username must contain lowercase letters and numbers only. It must be
+// unique only within the storage account.
+func (client LocalUsersClient) RegeneratePassword(ctx context.Context, resourceGroupName string, accountName string, username string) (result LocalUserRegeneratePasswordResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocalUsersClient.RegeneratePassword")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: username,
+ Constraints: []validation.Constraint{{Target: "username", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "username", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.LocalUsersClient", "RegeneratePassword", err.Error())
+ }
+
+ req, err := client.RegeneratePasswordPreparer(ctx, resourceGroupName, accountName, username)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "RegeneratePassword", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RegeneratePasswordSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "RegeneratePassword", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RegeneratePasswordResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.LocalUsersClient", "RegeneratePassword", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// RegeneratePasswordPreparer prepares the RegeneratePassword request.
+func (client LocalUsersClient) RegeneratePasswordPreparer(ctx context.Context, resourceGroupName string, accountName string, username string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "username": autorest.Encode("path", username),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/localUsers/{username}/regeneratePassword", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RegeneratePasswordSender sends the RegeneratePassword request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocalUsersClient) RegeneratePasswordSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// RegeneratePasswordResponder handles the response to the RegeneratePassword request. The method always
+// closes the http.Response Body.
+func (client LocalUsersClient) RegeneratePasswordResponder(resp *http.Response) (result LocalUserRegeneratePasswordResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/managementpolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/managementpolicies.go
new file mode 100644
index 000000000000..c55da446fd68
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/managementpolicies.go
@@ -0,0 +1,316 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ManagementPoliciesClient is the the Azure Storage Management API.
+type ManagementPoliciesClient struct {
+ BaseClient
+}
+
+// NewManagementPoliciesClient creates an instance of the ManagementPoliciesClient client.
+func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient {
+ return NewManagementPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewManagementPoliciesClientWithBaseURI creates an instance of the ManagementPoliciesClient client using a custom
+// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
+// stack).
+func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient {
+ return ManagementPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate sets the managementpolicy to the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// properties - the ManagementPolicy set to a storage account.
+func (client ManagementPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (result ManagementPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: properties,
+ Constraints: []validation.Constraint{{Target: "properties.ManagementPolicyProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.ManagementPoliciesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ManagementPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "managementPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagementPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ManagementPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ManagementPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the managementpolicy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client ManagementPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ManagementPoliciesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ManagementPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "managementPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagementPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ManagementPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the managementpolicy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client ManagementPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result ManagementPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ManagementPoliciesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ManagementPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "managementPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagementPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ManagementPoliciesClient) GetResponder(resp *http.Response) (result ManagementPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/models.go
new file mode 100644
index 000000000000..69058ef272dd
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/models.go
@@ -0,0 +1,5743 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
+
+// AccessPolicy ...
+type AccessPolicy struct {
+ // StartTime - Start time of the access policy
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // ExpiryTime - Expiry time of the access policy
+ ExpiryTime *date.Time `json:"expiryTime,omitempty"`
+ // Permission - List of abbreviated permissions.
+ Permission *string `json:"permission,omitempty"`
+}
+
+// Account the storage account.
+type Account struct {
+ autorest.Response `json:"-"`
+ // Sku - READ-ONLY; Gets the SKU.
+ Sku *Sku `json:"sku,omitempty"`
+ // Kind - READ-ONLY; Gets the Kind. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+ // Identity - The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // ExtendedLocation - The extendedLocation of the resource.
+ ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
+ // AccountProperties - Properties of the storage account.
+ *AccountProperties `json:"properties,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Location - The geo-location where the resource lives
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Account.
+func (a Account) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if a.Identity != nil {
+ objectMap["identity"] = a.Identity
+ }
+ if a.ExtendedLocation != nil {
+ objectMap["extendedLocation"] = a.ExtendedLocation
+ }
+ if a.AccountProperties != nil {
+ objectMap["properties"] = a.AccountProperties
+ }
+ if a.Tags != nil {
+ objectMap["tags"] = a.Tags
+ }
+ if a.Location != nil {
+ objectMap["location"] = a.Location
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Account struct.
+func (a *Account) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ a.Sku = &sku
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ a.Kind = kind
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ a.Identity = &identity
+ }
+ case "extendedLocation":
+ if v != nil {
+ var extendedLocation ExtendedLocation
+ err = json.Unmarshal(*v, &extendedLocation)
+ if err != nil {
+ return err
+ }
+ a.ExtendedLocation = &extendedLocation
+ }
+ case "properties":
+ if v != nil {
+ var accountProperties AccountProperties
+ err = json.Unmarshal(*v, &accountProperties)
+ if err != nil {
+ return err
+ }
+ a.AccountProperties = &accountProperties
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ a.Tags = tags
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ a.Location = &location
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ a.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ a.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ a.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountCheckNameAvailabilityParameters the parameters used to check the availability of the storage
+// account name.
+type AccountCheckNameAvailabilityParameters struct {
+ // Name - The storage account name.
+ Name *string `json:"name,omitempty"`
+ // Type - The type of resource, Microsoft.Storage/storageAccounts
+ Type *string `json:"type,omitempty"`
+}
+
+// AccountCreateParameters the parameters used when creating a storage account.
+type AccountCreateParameters struct {
+ // Sku - Required. Gets or sets the SKU name.
+ Sku *Sku `json:"sku,omitempty"`
+ // Kind - Required. Indicates the type of storage account. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+ // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed.
+ Location *string `json:"location,omitempty"`
+ // ExtendedLocation - Optional. Set the extended location of the resource. If not set, the storage account will be created in Azure main region. Otherwise it will be created in the specified extended location
+ ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
+ // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters.
+ Tags map[string]*string `json:"tags"`
+ // Identity - The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // AccountPropertiesCreateParameters - The parameters used to create the storage account.
+ *AccountPropertiesCreateParameters `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountCreateParameters.
+func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if acp.Sku != nil {
+ objectMap["sku"] = acp.Sku
+ }
+ if acp.Kind != "" {
+ objectMap["kind"] = acp.Kind
+ }
+ if acp.Location != nil {
+ objectMap["location"] = acp.Location
+ }
+ if acp.ExtendedLocation != nil {
+ objectMap["extendedLocation"] = acp.ExtendedLocation
+ }
+ if acp.Tags != nil {
+ objectMap["tags"] = acp.Tags
+ }
+ if acp.Identity != nil {
+ objectMap["identity"] = acp.Identity
+ }
+ if acp.AccountPropertiesCreateParameters != nil {
+ objectMap["properties"] = acp.AccountPropertiesCreateParameters
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct.
+func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ acp.Sku = &sku
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ acp.Kind = kind
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ acp.Location = &location
+ }
+ case "extendedLocation":
+ if v != nil {
+ var extendedLocation ExtendedLocation
+ err = json.Unmarshal(*v, &extendedLocation)
+ if err != nil {
+ return err
+ }
+ acp.ExtendedLocation = &extendedLocation
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ acp.Tags = tags
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ acp.Identity = &identity
+ }
+ case "properties":
+ if v != nil {
+ var accountPropertiesCreateParameters AccountPropertiesCreateParameters
+ err = json.Unmarshal(*v, &accountPropertiesCreateParameters)
+ if err != nil {
+ return err
+ }
+ acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountImmutabilityPolicyProperties this defines account-level immutability policy properties.
+type AccountImmutabilityPolicyProperties struct {
+ // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days.
+ ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"`
+ // State - The ImmutabilityPolicy state defines the mode of the policy. Disabled state disables the policy, Unlocked state allows increase and decrease of immutability retention time and also allows toggling allowProtectedAppendWrites property, Locked state only allows the increase of the immutability retention time. A policy can only be created in a Disabled or Unlocked state and can be toggled between the two states. Only a policy in an Unlocked state can transition to a Locked state which cannot be reverted. Possible values include: 'AccountImmutabilityPolicyStateUnlocked', 'AccountImmutabilityPolicyStateLocked', 'AccountImmutabilityPolicyStateDisabled'
+ State AccountImmutabilityPolicyState `json:"state,omitempty"`
+ // AllowProtectedAppendWrites - This property can only be changed for disabled and unlocked time-based retention policies. When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted.
+ AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty"`
+}
+
+// AccountInternetEndpoints the URIs that are used to perform a retrieval of a public blob, file, web or
+// dfs object via a internet routing endpoint.
+type AccountInternetEndpoints struct {
+ // Blob - READ-ONLY; Gets the blob endpoint.
+ Blob *string `json:"blob,omitempty"`
+ // File - READ-ONLY; Gets the file endpoint.
+ File *string `json:"file,omitempty"`
+ // Web - READ-ONLY; Gets the web endpoint.
+ Web *string `json:"web,omitempty"`
+ // Dfs - READ-ONLY; Gets the dfs endpoint.
+ Dfs *string `json:"dfs,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountInternetEndpoints.
+func (aie AccountInternetEndpoints) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// AccountKey an access key for the storage account.
+type AccountKey struct {
+ // KeyName - READ-ONLY; Name of the key.
+ KeyName *string `json:"keyName,omitempty"`
+ // Value - READ-ONLY; Base 64-encoded value of the key.
+ Value *string `json:"value,omitempty"`
+ // Permissions - READ-ONLY; Permissions for the key -- read-only or full permissions. Possible values include: 'KeyPermissionRead', 'KeyPermissionFull'
+ Permissions KeyPermission `json:"permissions,omitempty"`
+ // CreationTime - READ-ONLY; Creation time of the key, in round trip date format.
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountKey.
+func (ak AccountKey) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// AccountListKeysResult the response from the ListKeys operation.
+type AccountListKeysResult struct {
+ autorest.Response `json:"-"`
+ // Keys - READ-ONLY; Gets the list of storage account keys and their properties for the specified storage account.
+ Keys *[]AccountKey `json:"keys,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountListKeysResult.
+func (alkr AccountListKeysResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// AccountListResult the response from the List Storage Accounts operation.
+type AccountListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; Gets the list of storage accounts and their properties.
+ Value *[]Account `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of storage accounts. Returned when total number of requested storage accounts exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountListResult.
+func (alr AccountListResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// AccountListResultIterator provides access to a complete listing of Account values.
+type AccountListResultIterator struct {
+ i int
+ page AccountListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AccountListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AccountListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AccountListResultIterator) Response() AccountListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AccountListResultIterator) Value() Account {
+ if !iter.page.NotDone() {
+ return Account{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AccountListResultIterator type.
+func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
+ return AccountListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr AccountListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (alr AccountListResult) hasNextLink() bool {
+ return alr.NextLink != nil && len(*alr.NextLink) != 0
+}
+
+// accountListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if !alr.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.NextLink)))
+}
+
+// AccountListResultPage contains a page of Account values.
+type AccountListResultPage struct {
+ fn func(context.Context, AccountListResult) (AccountListResult, error)
+ alr AccountListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AccountListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AccountListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AccountListResultPage) Response() AccountListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AccountListResultPage) Values() []Account {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the AccountListResultPage type.
+func NewAccountListResultPage(cur AccountListResult, getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
+ return AccountListResultPage{
+ fn: getNextPage,
+ alr: cur,
+ }
+}
+
+// AccountMicrosoftEndpoints the URIs that are used to perform a retrieval of a public blob, queue, table,
+// web or dfs object via a microsoft routing endpoint.
+type AccountMicrosoftEndpoints struct {
+ // Blob - READ-ONLY; Gets the blob endpoint.
+ Blob *string `json:"blob,omitempty"`
+ // Queue - READ-ONLY; Gets the queue endpoint.
+ Queue *string `json:"queue,omitempty"`
+ // Table - READ-ONLY; Gets the table endpoint.
+ Table *string `json:"table,omitempty"`
+ // File - READ-ONLY; Gets the file endpoint.
+ File *string `json:"file,omitempty"`
+ // Web - READ-ONLY; Gets the web endpoint.
+ Web *string `json:"web,omitempty"`
+ // Dfs - READ-ONLY; Gets the dfs endpoint.
+ Dfs *string `json:"dfs,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountMicrosoftEndpoints.
+func (ame AccountMicrosoftEndpoints) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// AccountProperties properties of the storage account.
+type AccountProperties struct {
+ // ProvisioningState - READ-ONLY; Gets the status of the storage account at the time the operation was called. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateResolvingDNS', 'ProvisioningStateSucceeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // PrimaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint.
+ PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"`
+ // PrimaryLocation - READ-ONLY; Gets the location of the primary data center for the storage account.
+ PrimaryLocation *string `json:"primaryLocation,omitempty"`
+ // StatusOfPrimary - READ-ONLY; Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'AccountStatusAvailable', 'AccountStatusUnavailable'
+ StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"`
+ // LastGeoFailoverTime - READ-ONLY; Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS.
+ LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"`
+ // SecondaryLocation - READ-ONLY; Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS.
+ SecondaryLocation *string `json:"secondaryLocation,omitempty"`
+ // StatusOfSecondary - READ-ONLY; Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'AccountStatusAvailable', 'AccountStatusUnavailable'
+ StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"`
+ // CreationTime - READ-ONLY; Gets the creation date and time of the storage account in UTC.
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // CustomDomain - READ-ONLY; Gets the custom domain the user assigned to this storage account.
+ CustomDomain *CustomDomain `json:"customDomain,omitempty"`
+ // SasPolicy - READ-ONLY; SasPolicy assigned to the storage account.
+ SasPolicy *SasPolicy `json:"sasPolicy,omitempty"`
+ // KeyPolicy - READ-ONLY; KeyPolicy assigned to the storage account.
+ KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"`
+ // KeyCreationTime - READ-ONLY; Storage account keys creation time.
+ KeyCreationTime *KeyCreationTime `json:"keyCreationTime,omitempty"`
+ // SecondaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS.
+ SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"`
+ // Encryption - READ-ONLY; Encryption settings to be used for server-side encryption for the storage account.
+ Encryption *Encryption `json:"encryption,omitempty"`
+ // AccessTier - READ-ONLY; Required for storage accounts where kind = BlobStorage. The access tier is used for billing. The 'Premium' access tier is the default value for premium block blobs storage account type and it cannot be changed for the premium block blobs storage account type. Possible values include: 'AccessTierHot', 'AccessTierCool', 'AccessTierPremium'
+ AccessTier AccessTier `json:"accessTier,omitempty"`
+ // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files.
+ AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"`
+ // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true.
+ EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
+ // NetworkRuleSet - READ-ONLY; Network rule set
+ NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
+ // IsSftpEnabled - Enables Secure File Transfer Protocol, if set to true
+ IsSftpEnabled *bool `json:"isSftpEnabled,omitempty"`
+ // IsLocalUserEnabled - Enables local users feature, if set to true
+ IsLocalUserEnabled *bool `json:"isLocalUserEnabled,omitempty"`
+ // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true.
+ IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"`
+ // GeoReplicationStats - READ-ONLY; Geo Replication Stats
+ GeoReplicationStats *GeoReplicationStats `json:"geoReplicationStats,omitempty"`
+ // FailoverInProgress - READ-ONLY; If the failover is in progress, the value will be true, otherwise, it will be null.
+ FailoverInProgress *bool `json:"failoverInProgress,omitempty"`
+ // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled'
+ LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"`
+ // PrivateEndpointConnections - READ-ONLY; List of private endpoint connection associated with the specified storage account
+ PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"`
+ // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer
+ RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"`
+ // BlobRestoreStatus - READ-ONLY; Blob restore status
+ BlobRestoreStatus *BlobRestoreStatus `json:"blobRestoreStatus,omitempty"`
+ // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
+ AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"`
+ // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12'
+ MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"`
+ // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
+ AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"`
+ // EnableNfsV3 - NFS 3.0 protocol support enabled if set to true.
+ EnableNfsV3 *bool `json:"isNfsV3Enabled,omitempty"`
+ // AllowCrossTenantReplication - Allow or disallow cross AAD tenant object replication. The default interpretation is true for this property.
+ AllowCrossTenantReplication *bool `json:"allowCrossTenantReplication,omitempty"`
+ // DefaultToOAuthAuthentication - A boolean flag which indicates whether the default authentication is OAuth or not. The default interpretation is false for this property.
+ DefaultToOAuthAuthentication *bool `json:"defaultToOAuthAuthentication,omitempty"`
+ // PublicNetworkAccess - Allow or disallow public network access to Storage Account. Value is optional but if passed in, must be 'Enabled' or 'Disabled'. Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
+ // ImmutableStorageWithVersioning - The property is immutable and can only be set to true at the account creation time. When set to true, it enables object level immutability for all the containers in the account by default.
+ ImmutableStorageWithVersioning *ImmutableStorageAccount `json:"immutableStorageWithVersioning,omitempty"`
+ // AllowedCopyScope - Restrict copy to and from Storage Accounts within an AAD tenant or with Private Links to the same VNet. Possible values include: 'AllowedCopyScopePrivateLink', 'AllowedCopyScopeAAD'
+ AllowedCopyScope AllowedCopyScope `json:"allowedCopyScope,omitempty"`
+ // StorageAccountSkuConversionStatus - This property is readOnly and is set by server during asynchronous storage account sku conversion operations.
+ StorageAccountSkuConversionStatus *AccountSkuConversionStatus `json:"storageAccountSkuConversionStatus,omitempty"`
+ // DNSEndpointType - Allows you to specify the type of endpoint. Set this to AzureDNSZone to create a large number of accounts in a single subscription, which creates accounts in an Azure DNS Zone and the endpoint URL will have an alphanumeric DNS Zone identifier. Possible values include: 'DNSEndpointTypeStandard', 'DNSEndpointTypeAzureDNSZone'
+ DNSEndpointType DNSEndpointType `json:"dnsEndpointType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountProperties.
+func (ap AccountProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ap.AzureFilesIdentityBasedAuthentication != nil {
+ objectMap["azureFilesIdentityBasedAuthentication"] = ap.AzureFilesIdentityBasedAuthentication
+ }
+ if ap.EnableHTTPSTrafficOnly != nil {
+ objectMap["supportsHttpsTrafficOnly"] = ap.EnableHTTPSTrafficOnly
+ }
+ if ap.IsSftpEnabled != nil {
+ objectMap["isSftpEnabled"] = ap.IsSftpEnabled
+ }
+ if ap.IsLocalUserEnabled != nil {
+ objectMap["isLocalUserEnabled"] = ap.IsLocalUserEnabled
+ }
+ if ap.IsHnsEnabled != nil {
+ objectMap["isHnsEnabled"] = ap.IsHnsEnabled
+ }
+ if ap.LargeFileSharesState != "" {
+ objectMap["largeFileSharesState"] = ap.LargeFileSharesState
+ }
+ if ap.RoutingPreference != nil {
+ objectMap["routingPreference"] = ap.RoutingPreference
+ }
+ if ap.AllowBlobPublicAccess != nil {
+ objectMap["allowBlobPublicAccess"] = ap.AllowBlobPublicAccess
+ }
+ if ap.MinimumTLSVersion != "" {
+ objectMap["minimumTlsVersion"] = ap.MinimumTLSVersion
+ }
+ if ap.AllowSharedKeyAccess != nil {
+ objectMap["allowSharedKeyAccess"] = ap.AllowSharedKeyAccess
+ }
+ if ap.EnableNfsV3 != nil {
+ objectMap["isNfsV3Enabled"] = ap.EnableNfsV3
+ }
+ if ap.AllowCrossTenantReplication != nil {
+ objectMap["allowCrossTenantReplication"] = ap.AllowCrossTenantReplication
+ }
+ if ap.DefaultToOAuthAuthentication != nil {
+ objectMap["defaultToOAuthAuthentication"] = ap.DefaultToOAuthAuthentication
+ }
+ if ap.PublicNetworkAccess != "" {
+ objectMap["publicNetworkAccess"] = ap.PublicNetworkAccess
+ }
+ if ap.ImmutableStorageWithVersioning != nil {
+ objectMap["immutableStorageWithVersioning"] = ap.ImmutableStorageWithVersioning
+ }
+ if ap.AllowedCopyScope != "" {
+ objectMap["allowedCopyScope"] = ap.AllowedCopyScope
+ }
+ if ap.StorageAccountSkuConversionStatus != nil {
+ objectMap["storageAccountSkuConversionStatus"] = ap.StorageAccountSkuConversionStatus
+ }
+ if ap.DNSEndpointType != "" {
+ objectMap["dnsEndpointType"] = ap.DNSEndpointType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AccountPropertiesCreateParameters the parameters used to create the storage account.
+type AccountPropertiesCreateParameters struct {
+ // AllowedCopyScope - Restrict copy to and from Storage Accounts within an AAD tenant or with Private Links to the same VNet. Possible values include: 'AllowedCopyScopePrivateLink', 'AllowedCopyScopeAAD'
+ AllowedCopyScope AllowedCopyScope `json:"allowedCopyScope,omitempty"`
+ // PublicNetworkAccess - Allow or disallow public network access to Storage Account. Value is optional but if passed in, must be 'Enabled' or 'Disabled'. Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
+ // SasPolicy - SasPolicy assigned to the storage account.
+ SasPolicy *SasPolicy `json:"sasPolicy,omitempty"`
+ // KeyPolicy - KeyPolicy assigned to the storage account.
+ KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"`
+ // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
+ CustomDomain *CustomDomain `json:"customDomain,omitempty"`
+ // Encryption - Encryption settings to be used for server-side encryption for the storage account.
+ Encryption *Encryption `json:"encryption,omitempty"`
+ // NetworkRuleSet - Network rule set
+ NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
+ // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier is used for billing. The 'Premium' access tier is the default value for premium block blobs storage account type and it cannot be changed for the premium block blobs storage account type. Possible values include: 'AccessTierHot', 'AccessTierCool', 'AccessTierPremium'
+ AccessTier AccessTier `json:"accessTier,omitempty"`
+ // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files.
+ AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"`
+ // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01.
+ EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
+ // IsSftpEnabled - Enables Secure File Transfer Protocol, if set to true
+ IsSftpEnabled *bool `json:"isSftpEnabled,omitempty"`
+ // IsLocalUserEnabled - Enables local users feature, if set to true
+ IsLocalUserEnabled *bool `json:"isLocalUserEnabled,omitempty"`
+ // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true.
+ IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"`
+ // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled'
+ LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"`
+ // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer
+ RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"`
+ // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
+ AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"`
+ // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12'
+ MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"`
+ // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
+ AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"`
+ // EnableNfsV3 - NFS 3.0 protocol support enabled if set to true.
+ EnableNfsV3 *bool `json:"isNfsV3Enabled,omitempty"`
+ // AllowCrossTenantReplication - Allow or disallow cross AAD tenant object replication. The default interpretation is true for this property.
+ AllowCrossTenantReplication *bool `json:"allowCrossTenantReplication,omitempty"`
+ // DefaultToOAuthAuthentication - A boolean flag which indicates whether the default authentication is OAuth or not. The default interpretation is false for this property.
+ DefaultToOAuthAuthentication *bool `json:"defaultToOAuthAuthentication,omitempty"`
+ // ImmutableStorageWithVersioning - The property is immutable and can only be set to true at the account creation time. When set to true, it enables object level immutability for all the new containers in the account by default.
+ ImmutableStorageWithVersioning *ImmutableStorageAccount `json:"immutableStorageWithVersioning,omitempty"`
+ // DNSEndpointType - Allows you to specify the type of endpoint. Set this to AzureDNSZone to create a large number of accounts in a single subscription, which creates accounts in an Azure DNS Zone and the endpoint URL will have an alphanumeric DNS Zone identifier. Possible values include: 'DNSEndpointTypeStandard', 'DNSEndpointTypeAzureDNSZone'
+ DNSEndpointType DNSEndpointType `json:"dnsEndpointType,omitempty"`
+}
+
+// AccountPropertiesUpdateParameters the parameters used when updating a storage account.
+type AccountPropertiesUpdateParameters struct {
+ // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
+ CustomDomain *CustomDomain `json:"customDomain,omitempty"`
+ // Encryption - Not applicable. Azure Storage encryption at rest is enabled by default for all storage accounts and cannot be disabled.
+ Encryption *Encryption `json:"encryption,omitempty"`
+ // SasPolicy - SasPolicy assigned to the storage account.
+ SasPolicy *SasPolicy `json:"sasPolicy,omitempty"`
+ // KeyPolicy - KeyPolicy assigned to the storage account.
+ KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"`
+ // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier is used for billing. The 'Premium' access tier is the default value for premium block blobs storage account type and it cannot be changed for the premium block blobs storage account type. Possible values include: 'AccessTierHot', 'AccessTierCool', 'AccessTierPremium'
+ AccessTier AccessTier `json:"accessTier,omitempty"`
+ // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files.
+ AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"`
+ // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true.
+ EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
+ // IsSftpEnabled - Enables Secure File Transfer Protocol, if set to true
+ IsSftpEnabled *bool `json:"isSftpEnabled,omitempty"`
+ // IsLocalUserEnabled - Enables local users feature, if set to true
+ IsLocalUserEnabled *bool `json:"isLocalUserEnabled,omitempty"`
+ // NetworkRuleSet - Network rule set
+ NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
+ // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled'
+ LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"`
+ // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer
+ RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"`
+ // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
+ AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"`
+ // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12'
+ MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"`
+ // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
+ AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"`
+ // AllowCrossTenantReplication - Allow or disallow cross AAD tenant object replication. The default interpretation is true for this property.
+ AllowCrossTenantReplication *bool `json:"allowCrossTenantReplication,omitempty"`
+ // DefaultToOAuthAuthentication - A boolean flag which indicates whether the default authentication is OAuth or not. The default interpretation is false for this property.
+ DefaultToOAuthAuthentication *bool `json:"defaultToOAuthAuthentication,omitempty"`
+ // PublicNetworkAccess - Allow or disallow public network access to Storage Account. Value is optional but if passed in, must be 'Enabled' or 'Disabled'. Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
+ PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
+ // ImmutableStorageWithVersioning - The property is immutable and can only be set to true at the account creation time. When set to true, it enables object level immutability for all the containers in the account by default.
+ ImmutableStorageWithVersioning *ImmutableStorageAccount `json:"immutableStorageWithVersioning,omitempty"`
+ // AllowedCopyScope - Restrict copy to and from Storage Accounts within an AAD tenant or with Private Links to the same VNet. Possible values include: 'AllowedCopyScopePrivateLink', 'AllowedCopyScopeAAD'
+ AllowedCopyScope AllowedCopyScope `json:"allowedCopyScope,omitempty"`
+ // DNSEndpointType - Allows you to specify the type of endpoint. Set this to AzureDNSZone to create a large number of accounts in a single subscription, which creates accounts in an Azure DNS Zone and the endpoint URL will have an alphanumeric DNS Zone identifier. Possible values include: 'DNSEndpointTypeStandard', 'DNSEndpointTypeAzureDNSZone'
+ DNSEndpointType DNSEndpointType `json:"dnsEndpointType,omitempty"`
+}
+
+// AccountRegenerateKeyParameters the parameters used to regenerate the storage account key.
+type AccountRegenerateKeyParameters struct {
+ // KeyName - The name of storage keys that want to be regenerated, possible values are key1, key2, kerb1, kerb2.
+ KeyName *string `json:"keyName,omitempty"`
+}
+
+// AccountsAbortHierarchicalNamespaceMigrationFuture an abstraction for monitoring and retrieving the
+// results of a long-running operation.
+type AccountsAbortHierarchicalNamespaceMigrationFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AccountsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AccountsAbortHierarchicalNamespaceMigrationFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for AccountsAbortHierarchicalNamespaceMigrationFuture.Result.
+func (future *AccountsAbortHierarchicalNamespaceMigrationFuture) result(client AccountsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsAbortHierarchicalNamespaceMigrationFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ ar.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsAbortHierarchicalNamespaceMigrationFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// AccountSasParameters the parameters to list SAS credentials of a storage account.
+type AccountSasParameters struct {
+ // Services - The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). Possible values include: 'ServicesB', 'ServicesQ', 'ServicesT', 'ServicesF'
+ Services Services `json:"signedServices,omitempty"`
+ // ResourceTypes - The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. Possible values include: 'SignedResourceTypesS', 'SignedResourceTypesC', 'SignedResourceTypesO'
+ ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"`
+ // Permissions - The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'PermissionsR', 'PermissionsD', 'PermissionsW', 'PermissionsL', 'PermissionsA', 'PermissionsC', 'PermissionsU', 'PermissionsP'
+ Permissions Permissions `json:"signedPermission,omitempty"`
+ // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests.
+ IPAddressOrRange *string `json:"signedIp,omitempty"`
+ // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'HTTPProtocolHttpshttp', 'HTTPProtocolHTTPS'
+ Protocols HTTPProtocol `json:"signedProtocol,omitempty"`
+ // SharedAccessStartTime - The time at which the SAS becomes valid.
+ SharedAccessStartTime *date.Time `json:"signedStart,omitempty"`
+ // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid.
+ SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"`
+ // KeyToSign - The key to sign the account SAS token with.
+ KeyToSign *string `json:"keyToSign,omitempty"`
+}
+
+// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AccountsCreateFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AccountsClient) (Account, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AccountsCreateFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for AccountsCreateFuture.Result.
+func (future *AccountsCreateFuture) result(client AccountsClient) (a Account, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ a.Response.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent {
+ a, err = client.CreateResponder(a.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// AccountsFailoverFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AccountsFailoverFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AccountsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AccountsFailoverFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for AccountsFailoverFuture.Result.
+func (future *AccountsFailoverFuture) result(client AccountsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsFailoverFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ ar.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsFailoverFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// AccountsHierarchicalNamespaceMigrationFuture an abstraction for monitoring and retrieving the results of
+// a long-running operation.
+type AccountsHierarchicalNamespaceMigrationFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AccountsClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AccountsHierarchicalNamespaceMigrationFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for AccountsHierarchicalNamespaceMigrationFuture.Result.
+func (future *AccountsHierarchicalNamespaceMigrationFuture) result(client AccountsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsHierarchicalNamespaceMigrationFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ ar.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsHierarchicalNamespaceMigrationFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// AccountSkuConversionStatus this defines the sku conversion status object for asynchronous sku
+// conversions.
+type AccountSkuConversionStatus struct {
+ // SkuConversionStatus - READ-ONLY; This property indicates the current sku conversion status. Possible values include: 'SkuConversionStatusInProgress', 'SkuConversionStatusSucceeded', 'SkuConversionStatusFailed'
+ SkuConversionStatus SkuConversionStatus `json:"skuConversionStatus,omitempty"`
+ // TargetSkuName - This property represents the target sku name to which the account sku is being converted asynchronously. Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS'
+ TargetSkuName SkuName `json:"targetSkuName,omitempty"`
+ // StartTime - READ-ONLY; This property represents the sku conversion start time.
+ StartTime *string `json:"startTime,omitempty"`
+ // EndTime - READ-ONLY; This property represents the sku conversion end time.
+ EndTime *string `json:"endTime,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountSkuConversionStatus.
+func (ascs AccountSkuConversionStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ascs.TargetSkuName != "" {
+ objectMap["targetSkuName"] = ascs.TargetSkuName
+ }
+ return json.Marshal(objectMap)
+}
+
+// AccountsRestoreBlobRangesFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type AccountsRestoreBlobRangesFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(AccountsClient) (BlobRestoreStatus, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *AccountsRestoreBlobRangesFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for AccountsRestoreBlobRangesFuture.Result.
+func (future *AccountsRestoreBlobRangesFuture) result(client AccountsClient) (brs BlobRestoreStatus, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsRestoreBlobRangesFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ brs.Response.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsRestoreBlobRangesFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if brs.Response.Response, err = future.GetResult(sender); err == nil && brs.Response.Response.StatusCode != http.StatusNoContent {
+ brs, err = client.RestoreBlobRangesResponder(brs.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsRestoreBlobRangesFuture", "Result", brs.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// AccountUpdateParameters the parameters that can be provided when updating the storage account
+// properties.
+type AccountUpdateParameters struct {
+ // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, nor can accounts of those SKU names be updated to any other value.
+ Sku *Sku `json:"sku,omitempty"`
+ // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters.
+ Tags map[string]*string `json:"tags"`
+ // Identity - The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // AccountPropertiesUpdateParameters - The parameters used when updating a storage account.
+ *AccountPropertiesUpdateParameters `json:"properties,omitempty"`
+ // Kind - Optional. Indicates the type of storage account. Currently only StorageV2 value supported by server. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountUpdateParameters.
+func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if aup.Sku != nil {
+ objectMap["sku"] = aup.Sku
+ }
+ if aup.Tags != nil {
+ objectMap["tags"] = aup.Tags
+ }
+ if aup.Identity != nil {
+ objectMap["identity"] = aup.Identity
+ }
+ if aup.AccountPropertiesUpdateParameters != nil {
+ objectMap["properties"] = aup.AccountPropertiesUpdateParameters
+ }
+ if aup.Kind != "" {
+ objectMap["kind"] = aup.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct.
+func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ aup.Sku = &sku
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ aup.Tags = tags
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ aup.Identity = &identity
+ }
+ case "properties":
+ if v != nil {
+ var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters
+ err = json.Unmarshal(*v, &accountPropertiesUpdateParameters)
+ if err != nil {
+ return err
+ }
+ aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ aup.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// ActiveDirectoryProperties settings properties for Active Directory (AD).
+type ActiveDirectoryProperties struct {
+ // DomainName - Specifies the primary domain that the AD DNS server is authoritative for.
+ DomainName *string `json:"domainName,omitempty"`
+ // NetBiosDomainName - Specifies the NetBIOS domain name.
+ NetBiosDomainName *string `json:"netBiosDomainName,omitempty"`
+ // ForestName - Specifies the Active Directory forest to get.
+ ForestName *string `json:"forestName,omitempty"`
+ // DomainGUID - Specifies the domain GUID.
+ DomainGUID *string `json:"domainGuid,omitempty"`
+ // DomainSid - Specifies the security identifier (SID).
+ DomainSid *string `json:"domainSid,omitempty"`
+ // AzureStorageSid - Specifies the security identifier (SID) for Azure Storage.
+ AzureStorageSid *string `json:"azureStorageSid,omitempty"`
+ // SamAccountName - Specifies the Active Directory SAMAccountName for Azure Storage.
+ SamAccountName *string `json:"samAccountName,omitempty"`
+ // AccountType - Specifies the Active Directory account type for Azure Storage. Possible values include: 'AccountTypeUser', 'AccountTypeComputer'
+ AccountType AccountType `json:"accountType,omitempty"`
+}
+
+// AzureEntityResource the resource model definition for an Azure Resource Manager resource with an etag.
+type AzureEntityResource struct {
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AzureEntityResource.
+func (aer AzureEntityResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// AzureFilesIdentityBasedAuthentication settings for Azure Files identity based authentication.
+type AzureFilesIdentityBasedAuthentication struct {
+ // DirectoryServiceOptions - Indicates the directory service used. Possible values include: 'DirectoryServiceOptionsNone', 'DirectoryServiceOptionsAADDS', 'DirectoryServiceOptionsAD'
+ DirectoryServiceOptions DirectoryServiceOptions `json:"directoryServiceOptions,omitempty"`
+ // ActiveDirectoryProperties - Required if choose AD.
+ ActiveDirectoryProperties *ActiveDirectoryProperties `json:"activeDirectoryProperties,omitempty"`
+ // DefaultSharePermission - Default share permission for users using Kerberos authentication if RBAC role is not assigned. Possible values include: 'DefaultSharePermissionNone', 'DefaultSharePermissionStorageFileDataSmbShareReader', 'DefaultSharePermissionStorageFileDataSmbShareContributor', 'DefaultSharePermissionStorageFileDataSmbShareElevatedContributor'
+ DefaultSharePermission DefaultSharePermission `json:"defaultSharePermission,omitempty"`
+}
+
+// BlobContainer properties of the blob container, including Id, resource name, resource type, Etag.
+type BlobContainer struct {
+ autorest.Response `json:"-"`
+ // ContainerProperties - Properties of the blob container.
+ *ContainerProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobContainer.
+func (bc BlobContainer) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bc.ContainerProperties != nil {
+ objectMap["properties"] = bc.ContainerProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for BlobContainer struct.
+func (bc *BlobContainer) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var containerProperties ContainerProperties
+ err = json.Unmarshal(*v, &containerProperties)
+ if err != nil {
+ return err
+ }
+ bc.ContainerProperties = &containerProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ bc.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ bc.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ bc.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ bc.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// BlobContainersObjectLevelWormFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type BlobContainersObjectLevelWormFuture struct {
+ azure.FutureAPI
+ // Result returns the result of the asynchronous operation.
+ // If the operation has not completed it will return an error.
+ Result func(BlobContainersClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *BlobContainersObjectLevelWormFuture) UnmarshalJSON(body []byte) error {
+ var azFuture azure.Future
+ if err := json.Unmarshal(body, &azFuture); err != nil {
+ return err
+ }
+ future.FutureAPI = &azFuture
+ future.Result = future.result
+ return nil
+}
+
+// result is the default implementation for BlobContainersObjectLevelWormFuture.Result.
+func (future *BlobContainersObjectLevelWormFuture) result(client BlobContainersClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersObjectLevelWormFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ ar.Response = future.Response()
+ err = azure.NewAsyncOpIncompleteError("storage.BlobContainersObjectLevelWormFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// BlobInventoryPolicy the storage account blob inventory policy.
+type BlobInventoryPolicy struct {
+ autorest.Response `json:"-"`
+ // BlobInventoryPolicyProperties - Returns the storage account blob inventory policy rules.
+ *BlobInventoryPolicyProperties `json:"properties,omitempty"`
+ SystemData *SystemData `json:"systemData,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobInventoryPolicy.
+func (bip BlobInventoryPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bip.BlobInventoryPolicyProperties != nil {
+ objectMap["properties"] = bip.BlobInventoryPolicyProperties
+ }
+ if bip.SystemData != nil {
+ objectMap["systemData"] = bip.SystemData
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for BlobInventoryPolicy struct.
+func (bip *BlobInventoryPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var blobInventoryPolicyProperties BlobInventoryPolicyProperties
+ err = json.Unmarshal(*v, &blobInventoryPolicyProperties)
+ if err != nil {
+ return err
+ }
+ bip.BlobInventoryPolicyProperties = &blobInventoryPolicyProperties
+ }
+ case "systemData":
+ if v != nil {
+ var systemData SystemData
+ err = json.Unmarshal(*v, &systemData)
+ if err != nil {
+ return err
+ }
+ bip.SystemData = &systemData
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ bip.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ bip.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ bip.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// BlobInventoryPolicyDefinition an object that defines the blob inventory rule.
+type BlobInventoryPolicyDefinition struct {
+ // Filters - An object that defines the filter set.
+ Filters *BlobInventoryPolicyFilter `json:"filters,omitempty"`
+ // Format - This is a required field, it specifies the format for the inventory files. Possible values include: 'FormatCsv', 'FormatParquet'
+ Format Format `json:"format,omitempty"`
+ // Schedule - This is a required field. This field is used to schedule an inventory formation. Possible values include: 'ScheduleDaily', 'ScheduleWeekly'
+ Schedule Schedule `json:"schedule,omitempty"`
+ // ObjectType - This is a required field. This field specifies the scope of the inventory created either at the blob or container level. Possible values include: 'ObjectTypeBlob', 'ObjectTypeContainer'
+ ObjectType ObjectType `json:"objectType,omitempty"`
+ // SchemaFields - This is a required field. This field specifies the fields and properties of the object to be included in the inventory. The Schema field value 'Name' is always required. The valid values for this field for the 'Blob' definition.objectType include 'Name, Creation-Time, Last-Modified, Content-Length, Content-MD5, BlobType, AccessTier, AccessTierChangeTime, AccessTierInferred, Tags, Expiry-Time, hdi_isfolder, Owner, Group, Permissions, Acl, Snapshot, VersionId, IsCurrentVersion, Metadata, LastAccessTime, Tags, Etag, ContentType, ContentEncoding, ContentLanguage, ContentCRC64, CacheControl, ContentDisposition, LeaseStatus, LeaseState, LeaseDuration, ServerEncrypted, Deleted, DeletionId, DeletedTime, RemainingRetentionDays, ImmutabilityPolicyUntilDate, ImmutabilityPolicyMode, LegalHold, CopyId, CopyStatus, CopySource, CopyProgress, CopyCompletionTime, CopyStatusDescription, CustomerProvidedKeySha256, RehydratePriority, ArchiveStatus, XmsBlobSequenceNumber, EncryptionScope, IncrementalCopy, TagCount'. For Blob object type schema field value 'DeletedTime' is applicable only for Hns enabled accounts. The valid values for 'Container' definition.objectType include 'Name, Last-Modified, Metadata, LeaseStatus, LeaseState, LeaseDuration, PublicAccess, HasImmutabilityPolicy, HasLegalHold, Etag, DefaultEncryptionScope, DenyEncryptionScopeOverride, ImmutableStorageWithVersioningEnabled, Deleted, Version, DeletedTime, RemainingRetentionDays'. Schema field values 'Expiry-Time, hdi_isfolder, Owner, Group, Permissions, Acl, DeletionId' are valid only for Hns enabled accounts.Schema field values 'Tags, TagCount' are only valid for Non-Hns accounts.
+ SchemaFields *[]string `json:"schemaFields,omitempty"`
+}
+
+// BlobInventoryPolicyFilter an object that defines the blob inventory rule filter conditions. For 'Blob'
+// definition.objectType all filter properties are applicable, 'blobTypes' is required and others are
+// optional. For 'Container' definition.objectType only prefixMatch is applicable and is optional.
+type BlobInventoryPolicyFilter struct {
+ // PrefixMatch - An array of strings with maximum 10 blob prefixes to be included in the inventory.
+ PrefixMatch *[]string `json:"prefixMatch,omitempty"`
+ // ExcludePrefix - An array of strings with maximum 10 blob prefixes to be excluded from the inventory.
+ ExcludePrefix *[]string `json:"excludePrefix,omitempty"`
+ // BlobTypes - An array of predefined enum values. Valid values include blockBlob, appendBlob, pageBlob. Hns accounts does not support pageBlobs. This field is required when definition.objectType property is set to 'Blob'.
+ BlobTypes *[]string `json:"blobTypes,omitempty"`
+ // IncludeBlobVersions - Includes blob versions in blob inventory when value is set to true. The definition.schemaFields values 'VersionId and IsCurrentVersion' are required if this property is set to true, else they must be excluded.
+ IncludeBlobVersions *bool `json:"includeBlobVersions,omitempty"`
+ // IncludeSnapshots - Includes blob snapshots in blob inventory when value is set to true. The definition.schemaFields value 'Snapshot' is required if this property is set to true, else it must be excluded.
+ IncludeSnapshots *bool `json:"includeSnapshots,omitempty"`
+ // IncludeDeleted - For 'Container' definition.objectType the definition.schemaFields must include 'Deleted, Version, DeletedTime and RemainingRetentionDays'. For 'Blob' definition.objectType and HNS enabled storage accounts the definition.schemaFields must include 'DeletionId, Deleted, DeletedTime and RemainingRetentionDays' and for Hns disabled accounts the definition.schemaFields must include 'Deleted and RemainingRetentionDays', else it must be excluded.
+ IncludeDeleted *bool `json:"includeDeleted,omitempty"`
+}
+
+// BlobInventoryPolicyProperties the storage account blob inventory policy properties.
+type BlobInventoryPolicyProperties struct {
+ // LastModifiedTime - READ-ONLY; Returns the last modified date and time of the blob inventory policy.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // Policy - The storage account blob inventory policy object. It is composed of policy rules.
+ Policy *BlobInventoryPolicySchema `json:"policy,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobInventoryPolicyProperties.
+func (bipp BlobInventoryPolicyProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bipp.Policy != nil {
+ objectMap["policy"] = bipp.Policy
+ }
+ return json.Marshal(objectMap)
+}
+
+// BlobInventoryPolicyRule an object that wraps the blob inventory rule. Each rule is uniquely defined by
+// name.
+type BlobInventoryPolicyRule struct {
+ // Enabled - Rule is enabled when set to true.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy.
+ Name *string `json:"name,omitempty"`
+ // Destination - Container name where blob inventory files are stored. Must be pre-created.
+ Destination *string `json:"destination,omitempty"`
+ // Definition - An object that defines the blob inventory policy rule.
+ Definition *BlobInventoryPolicyDefinition `json:"definition,omitempty"`
+}
+
+// BlobInventoryPolicySchema the storage account blob inventory policy rules.
+type BlobInventoryPolicySchema struct {
+ // Enabled - Policy is enabled if set to true.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Destination - READ-ONLY; Deprecated Property from API version 2021-04-01 onwards, the required destination container name must be specified at the rule level 'policy.rule.destination'
+ Destination *string `json:"destination,omitempty"`
+ // Type - The valid value is Inventory
+ Type *string `json:"type,omitempty"`
+ // Rules - The storage account blob inventory policy rules. The rule is applied when it is enabled.
+ Rules *[]BlobInventoryPolicyRule `json:"rules,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobInventoryPolicySchema.
+func (bips BlobInventoryPolicySchema) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bips.Enabled != nil {
+ objectMap["enabled"] = bips.Enabled
+ }
+ if bips.Type != nil {
+ objectMap["type"] = bips.Type
+ }
+ if bips.Rules != nil {
+ objectMap["rules"] = bips.Rules
+ }
+ return json.Marshal(objectMap)
+}
+
+// BlobRestoreParameters blob restore parameters
+type BlobRestoreParameters struct {
+ // TimeToRestore - Restore blob to the specified time.
+ TimeToRestore *date.Time `json:"timeToRestore,omitempty"`
+ // BlobRanges - Blob ranges to restore.
+ BlobRanges *[]BlobRestoreRange `json:"blobRanges,omitempty"`
+}
+
+// BlobRestoreRange blob range
+type BlobRestoreRange struct {
+ // StartRange - Blob start range. This is inclusive. Empty means account start.
+ StartRange *string `json:"startRange,omitempty"`
+ // EndRange - Blob end range. This is exclusive. Empty means account end.
+ EndRange *string `json:"endRange,omitempty"`
+}
+
+// BlobRestoreStatus blob restore status.
+type BlobRestoreStatus struct {
+ autorest.Response `json:"-"`
+ // Status - READ-ONLY; The status of blob restore progress. Possible values are: - InProgress: Indicates that blob restore is ongoing. - Complete: Indicates that blob restore has been completed successfully. - Failed: Indicates that blob restore is failed. Possible values include: 'BlobRestoreProgressStatusInProgress', 'BlobRestoreProgressStatusComplete', 'BlobRestoreProgressStatusFailed'
+ Status BlobRestoreProgressStatus `json:"status,omitempty"`
+ // FailureReason - READ-ONLY; Failure reason when blob restore is failed.
+ FailureReason *string `json:"failureReason,omitempty"`
+ // RestoreID - READ-ONLY; Id for tracking blob restore request.
+ RestoreID *string `json:"restoreId,omitempty"`
+ // Parameters - READ-ONLY; Blob restore request parameters.
+ Parameters *BlobRestoreParameters `json:"parameters,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobRestoreStatus.
+func (brs BlobRestoreStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// BlobServiceItems ...
+type BlobServiceItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of blob services returned.
+ Value *[]BlobServiceProperties `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobServiceItems.
+func (bsi BlobServiceItems) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// BlobServiceProperties the properties of a storage account’s Blob service.
+type BlobServiceProperties struct {
+ autorest.Response `json:"-"`
+ // BlobServicePropertiesProperties - The properties of a storage account’s Blob service.
+ *BlobServicePropertiesProperties `json:"properties,omitempty"`
+ // Sku - READ-ONLY; Sku name and tier.
+ Sku *Sku `json:"sku,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobServiceProperties.
+func (bsp BlobServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bsp.BlobServicePropertiesProperties != nil {
+ objectMap["properties"] = bsp.BlobServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for BlobServiceProperties struct.
+func (bsp *BlobServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var blobServiceProperties BlobServicePropertiesProperties
+ err = json.Unmarshal(*v, &blobServiceProperties)
+ if err != nil {
+ return err
+ }
+ bsp.BlobServicePropertiesProperties = &blobServiceProperties
+ }
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ bsp.Sku = &sku
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ bsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ bsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ bsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// BlobServicePropertiesProperties the properties of a storage account’s Blob service.
+type BlobServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service.
+ Cors *CorsRules `json:"cors,omitempty"`
+ // DefaultServiceVersion - DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions.
+ DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty"`
+ // DeleteRetentionPolicy - The blob service properties for blob soft delete.
+ DeleteRetentionPolicy *DeleteRetentionPolicy `json:"deleteRetentionPolicy,omitempty"`
+ // IsVersioningEnabled - Versioning is enabled if set to true.
+ IsVersioningEnabled *bool `json:"isVersioningEnabled,omitempty"`
+ // AutomaticSnapshotPolicyEnabled - Deprecated in favor of isVersioningEnabled property.
+ AutomaticSnapshotPolicyEnabled *bool `json:"automaticSnapshotPolicyEnabled,omitempty"`
+ // ChangeFeed - The blob service properties for change feed events.
+ ChangeFeed *ChangeFeed `json:"changeFeed,omitempty"`
+ // RestorePolicy - The blob service properties for blob restore policy.
+ RestorePolicy *RestorePolicyProperties `json:"restorePolicy,omitempty"`
+ // ContainerDeleteRetentionPolicy - The blob service properties for container soft delete.
+ ContainerDeleteRetentionPolicy *DeleteRetentionPolicy `json:"containerDeleteRetentionPolicy,omitempty"`
+ // LastAccessTimeTrackingPolicy - The blob service property to configure last access time based tracking policy.
+ LastAccessTimeTrackingPolicy *LastAccessTimeTrackingPolicy `json:"lastAccessTimeTrackingPolicy,omitempty"`
+}
+
+// ChangeFeed the blob service properties for change feed events.
+type ChangeFeed struct {
+ // Enabled - Indicates whether change feed event logging is enabled for the Blob service.
+ Enabled *bool `json:"enabled,omitempty"`
+ // RetentionInDays - Indicates the duration of changeFeed retention in days. Minimum value is 1 day and maximum value is 146000 days (400 years). A null value indicates an infinite retention of the change feed.
+ RetentionInDays *int32 `json:"retentionInDays,omitempty"`
+}
+
+// CheckNameAvailabilityResult the CheckNameAvailability operation response.
+type CheckNameAvailabilityResult struct {
+ autorest.Response `json:"-"`
+ // NameAvailable - READ-ONLY; Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used.
+ NameAvailable *bool `json:"nameAvailable,omitempty"`
+ // Reason - READ-ONLY; Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'ReasonAccountNameInvalid', 'ReasonAlreadyExists'
+ Reason Reason `json:"reason,omitempty"`
+ // Message - READ-ONLY; Gets an error message explaining the Reason value in more detail.
+ Message *string `json:"message,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for CheckNameAvailabilityResult.
+func (cnar CheckNameAvailabilityResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// CloudError an error response from the Storage service.
+type CloudError struct {
+ Error *CloudErrorBody `json:"error,omitempty"`
+}
+
+// CloudErrorBody an error response from the Storage service.
+type CloudErrorBody struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ // Target - The target of the particular error. For example, the name of the property in error.
+ Target *string `json:"target,omitempty"`
+ // Details - A list of additional details about the error.
+ Details *[]CloudErrorBody `json:"details,omitempty"`
+}
+
+// ContainerProperties the properties of a container.
+type ContainerProperties struct {
+ // Version - READ-ONLY; The version of the deleted blob container.
+ Version *string `json:"version,omitempty"`
+ // Deleted - READ-ONLY; Indicates whether the blob container was deleted.
+ Deleted *bool `json:"deleted,omitempty"`
+ // DeletedTime - READ-ONLY; Blob container deletion time.
+ DeletedTime *date.Time `json:"deletedTime,omitempty"`
+ // RemainingRetentionDays - READ-ONLY; Remaining retention days for soft deleted blob container.
+ RemainingRetentionDays *int32 `json:"remainingRetentionDays,omitempty"`
+ // DefaultEncryptionScope - Default the container to use specified encryption scope for all writes.
+ DefaultEncryptionScope *string `json:"defaultEncryptionScope,omitempty"`
+ // DenyEncryptionScopeOverride - Block override of encryption scope from the container default.
+ DenyEncryptionScopeOverride *bool `json:"denyEncryptionScopeOverride,omitempty"`
+ // PublicAccess - Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone'
+ PublicAccess PublicAccess `json:"publicAccess,omitempty"`
+ // LastModifiedTime - READ-ONLY; Returns the date and time the container was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // LeaseStatus - READ-ONLY; The lease status of the container. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked'
+ LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"`
+ // LeaseState - READ-ONLY; Lease state of the container. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken'
+ LeaseState LeaseState `json:"leaseState,omitempty"`
+ // LeaseDuration - READ-ONLY; Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed'
+ LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"`
+ // Metadata - A name-value pair to associate with the container as metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ImmutabilityPolicy - READ-ONLY; The ImmutabilityPolicy property of the container.
+ ImmutabilityPolicy *ImmutabilityPolicyProperties `json:"immutabilityPolicy,omitempty"`
+ // LegalHold - READ-ONLY; The LegalHold property of the container.
+ LegalHold *LegalHoldProperties `json:"legalHold,omitempty"`
+ // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account.
+ HasLegalHold *bool `json:"hasLegalHold,omitempty"`
+ // HasImmutabilityPolicy - READ-ONLY; The hasImmutabilityPolicy public property is set to true by SRP if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public property is set to false by SRP if ImmutabilityPolicy has not been created for this container.
+ HasImmutabilityPolicy *bool `json:"hasImmutabilityPolicy,omitempty"`
+ // ImmutableStorageWithVersioning - The object level immutability property of the container. The property is immutable and can only be set to true at the container creation time. Existing containers must undergo a migration process.
+ ImmutableStorageWithVersioning *ImmutableStorageWithVersioning `json:"immutableStorageWithVersioning,omitempty"`
+ // EnableNfsV3RootSquash - Enable NFSv3 root squash on blob container.
+ EnableNfsV3RootSquash *bool `json:"enableNfsV3RootSquash,omitempty"`
+ // EnableNfsV3AllSquash - Enable NFSv3 all squash on blob container.
+ EnableNfsV3AllSquash *bool `json:"enableNfsV3AllSquash,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ContainerProperties.
+func (cp ContainerProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cp.DefaultEncryptionScope != nil {
+ objectMap["defaultEncryptionScope"] = cp.DefaultEncryptionScope
+ }
+ if cp.DenyEncryptionScopeOverride != nil {
+ objectMap["denyEncryptionScopeOverride"] = cp.DenyEncryptionScopeOverride
+ }
+ if cp.PublicAccess != "" {
+ objectMap["publicAccess"] = cp.PublicAccess
+ }
+ if cp.Metadata != nil {
+ objectMap["metadata"] = cp.Metadata
+ }
+ if cp.ImmutableStorageWithVersioning != nil {
+ objectMap["immutableStorageWithVersioning"] = cp.ImmutableStorageWithVersioning
+ }
+ if cp.EnableNfsV3RootSquash != nil {
+ objectMap["enableNfsV3RootSquash"] = cp.EnableNfsV3RootSquash
+ }
+ if cp.EnableNfsV3AllSquash != nil {
+ objectMap["enableNfsV3AllSquash"] = cp.EnableNfsV3AllSquash
+ }
+ return json.Marshal(objectMap)
+}
+
+// CorsRule specifies a CORS rule for the Blob service.
+type CorsRule struct {
+ // AllowedOrigins - Required if CorsRule element is present. A list of origin domains that will be allowed via CORS, or "*" to allow all domains
+ AllowedOrigins *[]string `json:"allowedOrigins,omitempty"`
+ // AllowedMethods - Required if CorsRule element is present. A list of HTTP methods that are allowed to be executed by the origin.
+ AllowedMethods *[]string `json:"allowedMethods,omitempty"`
+ // MaxAgeInSeconds - Required if CorsRule element is present. The number of seconds that the client/browser should cache a preflight response.
+ MaxAgeInSeconds *int32 `json:"maxAgeInSeconds,omitempty"`
+ // ExposedHeaders - Required if CorsRule element is present. A list of response headers to expose to CORS clients.
+ ExposedHeaders *[]string `json:"exposedHeaders,omitempty"`
+ // AllowedHeaders - Required if CorsRule element is present. A list of headers allowed to be part of the cross-origin request.
+ AllowedHeaders *[]string `json:"allowedHeaders,omitempty"`
+}
+
+// CorsRules sets the CORS rules. You can include up to five CorsRule elements in the request.
+type CorsRules struct {
+ // CorsRules - The List of CORS rules. You can include up to five CorsRule elements in the request.
+ CorsRules *[]CorsRule `json:"corsRules,omitempty"`
+}
+
+// CustomDomain the custom domain assigned to this storage account. This can be set via Update.
+type CustomDomain struct {
+ // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source.
+ Name *string `json:"name,omitempty"`
+ // UseSubDomainName - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates.
+ UseSubDomainName *bool `json:"useSubDomainName,omitempty"`
+}
+
+// DateAfterCreation object to define snapshot and version action conditions.
+type DateAfterCreation struct {
+ // DaysAfterCreationGreaterThan - Value indicating the age in days after creation
+ DaysAfterCreationGreaterThan *float64 `json:"daysAfterCreationGreaterThan,omitempty"`
+ // DaysAfterLastTierChangeGreaterThan - Value indicating the age in days after last blob tier change time. This property is only applicable for tierToArchive actions and requires daysAfterCreationGreaterThan to be set for snapshots and blob version based actions. The blob will be archived if both the conditions are satisfied.
+ DaysAfterLastTierChangeGreaterThan *float64 `json:"daysAfterLastTierChangeGreaterThan,omitempty"`
+}
+
+// DateAfterModification object to define the base blob action conditions. Properties
+// daysAfterModificationGreaterThan, daysAfterLastAccessTimeGreaterThan and daysAfterCreationGreaterThan
+// are mutually exclusive. The daysAfterLastTierChangeGreaterThan property is only applicable for
+// tierToArchive actions which requires daysAfterModificationGreaterThan to be set, also it cannot be used
+// in conjunction with daysAfterLastAccessTimeGreaterThan or daysAfterCreationGreaterThan.
+type DateAfterModification struct {
+ // DaysAfterModificationGreaterThan - Value indicating the age in days after last modification
+ DaysAfterModificationGreaterThan *float64 `json:"daysAfterModificationGreaterThan,omitempty"`
+ // DaysAfterLastAccessTimeGreaterThan - Value indicating the age in days after last blob access. This property can only be used in conjunction with last access time tracking policy
+ DaysAfterLastAccessTimeGreaterThan *float64 `json:"daysAfterLastAccessTimeGreaterThan,omitempty"`
+ // DaysAfterLastTierChangeGreaterThan - Value indicating the age in days after last blob tier change time. This property is only applicable for tierToArchive actions and requires daysAfterModificationGreaterThan to be set for baseBlobs based actions. The blob will be archived if both the conditions are satisfied.
+ DaysAfterLastTierChangeGreaterThan *float64 `json:"daysAfterLastTierChangeGreaterThan,omitempty"`
+ // DaysAfterCreationGreaterThan - Value indicating the age in days after blob creation.
+ DaysAfterCreationGreaterThan *float64 `json:"daysAfterCreationGreaterThan,omitempty"`
+}
+
+// DeletedAccount deleted storage account
+type DeletedAccount struct {
+ autorest.Response `json:"-"`
+ // DeletedAccountProperties - Properties of the deleted account.
+ *DeletedAccountProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DeletedAccount.
+func (da DeletedAccount) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if da.DeletedAccountProperties != nil {
+ objectMap["properties"] = da.DeletedAccountProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for DeletedAccount struct.
+func (da *DeletedAccount) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var deletedAccountProperties DeletedAccountProperties
+ err = json.Unmarshal(*v, &deletedAccountProperties)
+ if err != nil {
+ return err
+ }
+ da.DeletedAccountProperties = &deletedAccountProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ da.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ da.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ da.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// DeletedAccountListResult the response from the List Deleted Accounts operation.
+type DeletedAccountListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; Gets the list of deleted accounts and their properties.
+ Value *[]DeletedAccount `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of deleted accounts. Returned when total number of requested deleted accounts exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DeletedAccountListResult.
+func (dalr DeletedAccountListResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// DeletedAccountListResultIterator provides access to a complete listing of DeletedAccount values.
+type DeletedAccountListResultIterator struct {
+ i int
+ page DeletedAccountListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *DeletedAccountListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *DeletedAccountListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter DeletedAccountListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter DeletedAccountListResultIterator) Response() DeletedAccountListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter DeletedAccountListResultIterator) Value() DeletedAccount {
+ if !iter.page.NotDone() {
+ return DeletedAccount{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the DeletedAccountListResultIterator type.
+func NewDeletedAccountListResultIterator(page DeletedAccountListResultPage) DeletedAccountListResultIterator {
+ return DeletedAccountListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (dalr DeletedAccountListResult) IsEmpty() bool {
+ return dalr.Value == nil || len(*dalr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (dalr DeletedAccountListResult) hasNextLink() bool {
+ return dalr.NextLink != nil && len(*dalr.NextLink) != 0
+}
+
+// deletedAccountListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (dalr DeletedAccountListResult) deletedAccountListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if !dalr.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(dalr.NextLink)))
+}
+
+// DeletedAccountListResultPage contains a page of DeletedAccount values.
+type DeletedAccountListResultPage struct {
+ fn func(context.Context, DeletedAccountListResult) (DeletedAccountListResult, error)
+ dalr DeletedAccountListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *DeletedAccountListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.dalr)
+ if err != nil {
+ return err
+ }
+ page.dalr = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *DeletedAccountListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page DeletedAccountListResultPage) NotDone() bool {
+ return !page.dalr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page DeletedAccountListResultPage) Response() DeletedAccountListResult {
+ return page.dalr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page DeletedAccountListResultPage) Values() []DeletedAccount {
+ if page.dalr.IsEmpty() {
+ return nil
+ }
+ return *page.dalr.Value
+}
+
+// Creates a new instance of the DeletedAccountListResultPage type.
+func NewDeletedAccountListResultPage(cur DeletedAccountListResult, getNextPage func(context.Context, DeletedAccountListResult) (DeletedAccountListResult, error)) DeletedAccountListResultPage {
+ return DeletedAccountListResultPage{
+ fn: getNextPage,
+ dalr: cur,
+ }
+}
+
+// DeletedAccountProperties attributes of a deleted storage account.
+type DeletedAccountProperties struct {
+ // StorageAccountResourceID - READ-ONLY; Full resource id of the original storage account.
+ StorageAccountResourceID *string `json:"storageAccountResourceId,omitempty"`
+ // Location - READ-ONLY; Location of the deleted account.
+ Location *string `json:"location,omitempty"`
+ // RestoreReference - READ-ONLY; Can be used to attempt recovering this deleted account via PutStorageAccount API.
+ RestoreReference *string `json:"restoreReference,omitempty"`
+ // CreationTime - READ-ONLY; Creation time of the deleted account.
+ CreationTime *string `json:"creationTime,omitempty"`
+ // DeletionTime - READ-ONLY; Deletion time of the deleted account.
+ DeletionTime *string `json:"deletionTime,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DeletedAccountProperties.
+func (dap DeletedAccountProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// DeletedShare the deleted share to be restored.
+type DeletedShare struct {
+ // DeletedShareName - Required. Identify the name of the deleted share that will be restored.
+ DeletedShareName *string `json:"deletedShareName,omitempty"`
+ // DeletedShareVersion - Required. Identify the version of the deleted share that will be restored.
+ DeletedShareVersion *string `json:"deletedShareVersion,omitempty"`
+}
+
+// DeleteRetentionPolicy the service properties for soft delete.
+type DeleteRetentionPolicy struct {
+ // Enabled - Indicates whether DeleteRetentionPolicy is enabled.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Days - Indicates the number of days that the deleted item should be retained. The minimum specified value can be 1 and the maximum value can be 365.
+ Days *int32 `json:"days,omitempty"`
+ // AllowPermanentDelete - This property when set to true allows deletion of the soft deleted blob versions and snapshots. This property cannot be used blob restore policy. This property only applies to blob service and does not apply to containers or file share.
+ AllowPermanentDelete *bool `json:"allowPermanentDelete,omitempty"`
+}
+
+// Dimension dimension of blobs, possibly be blob type or access tier.
+type Dimension struct {
+ // Name - Display name of dimension.
+ Name *string `json:"name,omitempty"`
+ // DisplayName - Display name of dimension.
+ DisplayName *string `json:"displayName,omitempty"`
+}
+
+// Encryption the encryption settings on the storage account.
+type Encryption struct {
+ // Services - List of services which support encryption.
+ Services *EncryptionServices `json:"services,omitempty"`
+ // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include: 'KeySourceMicrosoftStorage', 'KeySourceMicrosoftKeyvault'
+ KeySource KeySource `json:"keySource,omitempty"`
+ // RequireInfrastructureEncryption - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest.
+ RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"`
+ // KeyVaultProperties - Properties provided by key vault.
+ KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"`
+ // EncryptionIdentity - The identity to be used with service-side encryption at rest.
+ EncryptionIdentity *EncryptionIdentity `json:"identity,omitempty"`
+}
+
+// EncryptionIdentity encryption identity for the storage account.
+type EncryptionIdentity struct {
+ // EncryptionUserAssignedIdentity - Resource identifier of the UserAssigned identity to be associated with server-side encryption on the storage account.
+ EncryptionUserAssignedIdentity *string `json:"userAssignedIdentity,omitempty"`
+ // EncryptionFederatedIdentityClientID - ClientId of the multi-tenant application to be used in conjunction with the user-assigned identity for cross-tenant customer-managed-keys server-side encryption on the storage account.
+ EncryptionFederatedIdentityClientID *string `json:"federatedIdentityClientId,omitempty"`
+}
+
+// EncryptionScope the Encryption Scope resource.
+type EncryptionScope struct {
+ autorest.Response `json:"-"`
+ // EncryptionScopeProperties - Properties of the encryption scope.
+ *EncryptionScopeProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for EncryptionScope.
+func (es EncryptionScope) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if es.EncryptionScopeProperties != nil {
+ objectMap["properties"] = es.EncryptionScopeProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for EncryptionScope struct.
+func (es *EncryptionScope) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var encryptionScopeProperties EncryptionScopeProperties
+ err = json.Unmarshal(*v, &encryptionScopeProperties)
+ if err != nil {
+ return err
+ }
+ es.EncryptionScopeProperties = &encryptionScopeProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ es.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ es.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ es.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// EncryptionScopeKeyVaultProperties the key vault properties for the encryption scope. This is a required
+// field if encryption scope 'source' attribute is set to 'Microsoft.KeyVault'.
+type EncryptionScopeKeyVaultProperties struct {
+ // KeyURI - The object identifier for a key vault key object. When applied, the encryption scope will use the key referenced by the identifier to enable customer-managed key support on this encryption scope.
+ KeyURI *string `json:"keyUri,omitempty"`
+ // CurrentVersionedKeyIdentifier - READ-ONLY; The object identifier of the current versioned Key Vault Key in use.
+ CurrentVersionedKeyIdentifier *string `json:"currentVersionedKeyIdentifier,omitempty"`
+ // LastKeyRotationTimestamp - READ-ONLY; Timestamp of last rotation of the Key Vault Key.
+ LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for EncryptionScopeKeyVaultProperties.
+func (eskvp EncryptionScopeKeyVaultProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if eskvp.KeyURI != nil {
+ objectMap["keyUri"] = eskvp.KeyURI
+ }
+ return json.Marshal(objectMap)
+}
+
+// EncryptionScopeListResult list of encryption scopes requested, and if paging is required, a URL to the
+// next page of encryption scopes.
+type EncryptionScopeListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of encryption scopes requested.
+ Value *[]EncryptionScope `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of encryption scopes. Returned when total number of requested encryption scopes exceeds the maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for EncryptionScopeListResult.
+func (eslr EncryptionScopeListResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// EncryptionScopeListResultIterator provides access to a complete listing of EncryptionScope values.
+type EncryptionScopeListResultIterator struct {
+ i int
+ page EncryptionScopeListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *EncryptionScopeListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopeListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *EncryptionScopeListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter EncryptionScopeListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter EncryptionScopeListResultIterator) Response() EncryptionScopeListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter EncryptionScopeListResultIterator) Value() EncryptionScope {
+ if !iter.page.NotDone() {
+ return EncryptionScope{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the EncryptionScopeListResultIterator type.
+func NewEncryptionScopeListResultIterator(page EncryptionScopeListResultPage) EncryptionScopeListResultIterator {
+ return EncryptionScopeListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (eslr EncryptionScopeListResult) IsEmpty() bool {
+ return eslr.Value == nil || len(*eslr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (eslr EncryptionScopeListResult) hasNextLink() bool {
+ return eslr.NextLink != nil && len(*eslr.NextLink) != 0
+}
+
+// encryptionScopeListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (eslr EncryptionScopeListResult) encryptionScopeListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if !eslr.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(eslr.NextLink)))
+}
+
+// EncryptionScopeListResultPage contains a page of EncryptionScope values.
+type EncryptionScopeListResultPage struct {
+ fn func(context.Context, EncryptionScopeListResult) (EncryptionScopeListResult, error)
+ eslr EncryptionScopeListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *EncryptionScopeListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopeListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.eslr)
+ if err != nil {
+ return err
+ }
+ page.eslr = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *EncryptionScopeListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page EncryptionScopeListResultPage) NotDone() bool {
+ return !page.eslr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page EncryptionScopeListResultPage) Response() EncryptionScopeListResult {
+ return page.eslr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page EncryptionScopeListResultPage) Values() []EncryptionScope {
+ if page.eslr.IsEmpty() {
+ return nil
+ }
+ return *page.eslr.Value
+}
+
+// Creates a new instance of the EncryptionScopeListResultPage type.
+func NewEncryptionScopeListResultPage(cur EncryptionScopeListResult, getNextPage func(context.Context, EncryptionScopeListResult) (EncryptionScopeListResult, error)) EncryptionScopeListResultPage {
+ return EncryptionScopeListResultPage{
+ fn: getNextPage,
+ eslr: cur,
+ }
+}
+
+// EncryptionScopeProperties properties of the encryption scope.
+type EncryptionScopeProperties struct {
+ // Source - The provider for the encryption scope. Possible values (case-insensitive): Microsoft.Storage, Microsoft.KeyVault. Possible values include: 'EncryptionScopeSourceMicrosoftStorage', 'EncryptionScopeSourceMicrosoftKeyVault'
+ Source EncryptionScopeSource `json:"source,omitempty"`
+ // State - The state of the encryption scope. Possible values (case-insensitive): Enabled, Disabled. Possible values include: 'EncryptionScopeStateEnabled', 'EncryptionScopeStateDisabled'
+ State EncryptionScopeState `json:"state,omitempty"`
+ // CreationTime - READ-ONLY; Gets the creation date and time of the encryption scope in UTC.
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // LastModifiedTime - READ-ONLY; Gets the last modification date and time of the encryption scope in UTC.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // KeyVaultProperties - The key vault properties for the encryption scope. This is a required field if encryption scope 'source' attribute is set to 'Microsoft.KeyVault'.
+ KeyVaultProperties *EncryptionScopeKeyVaultProperties `json:"keyVaultProperties,omitempty"`
+ // RequireInfrastructureEncryption - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest.
+ RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for EncryptionScopeProperties.
+func (esp EncryptionScopeProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if esp.Source != "" {
+ objectMap["source"] = esp.Source
+ }
+ if esp.State != "" {
+ objectMap["state"] = esp.State
+ }
+ if esp.KeyVaultProperties != nil {
+ objectMap["keyVaultProperties"] = esp.KeyVaultProperties
+ }
+ if esp.RequireInfrastructureEncryption != nil {
+ objectMap["requireInfrastructureEncryption"] = esp.RequireInfrastructureEncryption
+ }
+ return json.Marshal(objectMap)
+}
+
+// EncryptionService a service that allows server-side encryption to be used.
+type EncryptionService struct {
+ // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored. Encryption at rest is enabled by default today and cannot be disabled.
+ Enabled *bool `json:"enabled,omitempty"`
+ // LastEnabledTime - READ-ONLY; Gets a rough estimate of the date/time when the encryption was last enabled by the user. Data is encrypted at rest by default today and cannot be disabled.
+ LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"`
+ // KeyType - Encryption key type to be used for the encryption service. 'Account' key type implies that an account-scoped encryption key will be used. 'Service' key type implies that a default service key is used. Possible values include: 'KeyTypeService', 'KeyTypeAccount'
+ KeyType KeyType `json:"keyType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for EncryptionService.
+func (es EncryptionService) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if es.Enabled != nil {
+ objectMap["enabled"] = es.Enabled
+ }
+ if es.KeyType != "" {
+ objectMap["keyType"] = es.KeyType
+ }
+ return json.Marshal(objectMap)
+}
+
+// EncryptionServices a list of services that support encryption.
+type EncryptionServices struct {
+ // Blob - The encryption function of the blob storage service.
+ Blob *EncryptionService `json:"blob,omitempty"`
+ // File - The encryption function of the file storage service.
+ File *EncryptionService `json:"file,omitempty"`
+ // Table - The encryption function of the table storage service.
+ Table *EncryptionService `json:"table,omitempty"`
+ // Queue - The encryption function of the queue storage service.
+ Queue *EncryptionService `json:"queue,omitempty"`
+}
+
+// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, table, web or dfs
+// object.
+type Endpoints struct {
+ // Blob - READ-ONLY; Gets the blob endpoint.
+ Blob *string `json:"blob,omitempty"`
+ // Queue - READ-ONLY; Gets the queue endpoint.
+ Queue *string `json:"queue,omitempty"`
+ // Table - READ-ONLY; Gets the table endpoint.
+ Table *string `json:"table,omitempty"`
+ // File - READ-ONLY; Gets the file endpoint.
+ File *string `json:"file,omitempty"`
+ // Web - READ-ONLY; Gets the web endpoint.
+ Web *string `json:"web,omitempty"`
+ // Dfs - READ-ONLY; Gets the dfs endpoint.
+ Dfs *string `json:"dfs,omitempty"`
+ // MicrosoftEndpoints - Gets the microsoft routing storage endpoints.
+ MicrosoftEndpoints *AccountMicrosoftEndpoints `json:"microsoftEndpoints,omitempty"`
+ // InternetEndpoints - Gets the internet routing storage endpoints
+ InternetEndpoints *AccountInternetEndpoints `json:"internetEndpoints,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Endpoints.
+func (e Endpoints) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if e.MicrosoftEndpoints != nil {
+ objectMap["microsoftEndpoints"] = e.MicrosoftEndpoints
+ }
+ if e.InternetEndpoints != nil {
+ objectMap["internetEndpoints"] = e.InternetEndpoints
+ }
+ return json.Marshal(objectMap)
+}
+
+// ErrorResponse an error response from the storage resource provider.
+type ErrorResponse struct {
+ // Error - Azure Storage Resource Provider error response body.
+ Error *ErrorResponseBody `json:"error,omitempty"`
+}
+
+// ErrorResponseBody error response body contract.
+type ErrorResponseBody struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+}
+
+// ExtendedLocation the complex type of the extended location.
+type ExtendedLocation struct {
+ // Name - The name of the extended location.
+ Name *string `json:"name,omitempty"`
+ // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone'
+ Type ExtendedLocationTypes `json:"type,omitempty"`
+}
+
+// FileServiceItems ...
+type FileServiceItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of file services returned.
+ Value *[]FileServiceProperties `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileServiceItems.
+func (fsi FileServiceItems) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// FileServiceProperties the properties of File services in storage account.
+type FileServiceProperties struct {
+ autorest.Response `json:"-"`
+ // FileServicePropertiesProperties - The properties of File services in storage account.
+ *FileServicePropertiesProperties `json:"properties,omitempty"`
+ // Sku - READ-ONLY; Sku name and tier.
+ Sku *Sku `json:"sku,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileServiceProperties.
+func (fsp FileServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsp.FileServicePropertiesProperties != nil {
+ objectMap["properties"] = fsp.FileServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileServiceProperties struct.
+func (fsp *FileServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileServiceProperties FileServicePropertiesProperties
+ err = json.Unmarshal(*v, &fileServiceProperties)
+ if err != nil {
+ return err
+ }
+ fsp.FileServicePropertiesProperties = &fileServiceProperties
+ }
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ fsp.Sku = &sku
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileServicePropertiesProperties the properties of File services in storage account.
+type FileServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the File service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the File service.
+ Cors *CorsRules `json:"cors,omitempty"`
+ // ShareDeleteRetentionPolicy - The file service properties for share soft delete.
+ ShareDeleteRetentionPolicy *DeleteRetentionPolicy `json:"shareDeleteRetentionPolicy,omitempty"`
+ // ProtocolSettings - Protocol settings for file service
+ ProtocolSettings *ProtocolSettings `json:"protocolSettings,omitempty"`
+}
+
+// FileShare properties of the file share, including Id, resource name, resource type, Etag.
+type FileShare struct {
+ autorest.Response `json:"-"`
+ // FileShareProperties - Properties of the file share.
+ *FileShareProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShare.
+func (fs FileShare) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fs.FileShareProperties != nil {
+ objectMap["properties"] = fs.FileShareProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileShare struct.
+func (fs *FileShare) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileShareProperties FileShareProperties
+ err = json.Unmarshal(*v, &fileShareProperties)
+ if err != nil {
+ return err
+ }
+ fs.FileShareProperties = &fileShareProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fs.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fs.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fs.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fs.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileShareItem the file share properties be listed out.
+type FileShareItem struct {
+ // FileShareProperties - The file share properties be listed out.
+ *FileShareProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareItem.
+func (fsi FileShareItem) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsi.FileShareProperties != nil {
+ objectMap["properties"] = fsi.FileShareProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileShareItem struct.
+func (fsi *FileShareItem) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileShareProperties FileShareProperties
+ err = json.Unmarshal(*v, &fileShareProperties)
+ if err != nil {
+ return err
+ }
+ fsi.FileShareProperties = &fileShareProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fsi.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fsi.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fsi.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsi.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileShareItems response schema. Contains list of shares returned, and if paging is requested or
+// required, a URL to next page of shares.
+type FileShareItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of file shares returned.
+ Value *[]FileShareItem `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of shares. Returned when total number of requested shares exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareItems.
+func (fsi FileShareItems) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// FileShareItemsIterator provides access to a complete listing of FileShareItem values.
+type FileShareItemsIterator struct {
+ i int
+ page FileShareItemsPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *FileShareItemsIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *FileShareItemsIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter FileShareItemsIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter FileShareItemsIterator) Response() FileShareItems {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter FileShareItemsIterator) Value() FileShareItem {
+ if !iter.page.NotDone() {
+ return FileShareItem{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the FileShareItemsIterator type.
+func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
+ return FileShareItemsIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (fsi FileShareItems) IsEmpty() bool {
+ return fsi.Value == nil || len(*fsi.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (fsi FileShareItems) hasNextLink() bool {
+ return fsi.NextLink != nil && len(*fsi.NextLink) != 0
+}
+
+// fileShareItemsPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (fsi FileShareItems) fileShareItemsPreparer(ctx context.Context) (*http.Request, error) {
+ if !fsi.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(fsi.NextLink)))
+}
+
+// FileShareItemsPage contains a page of FileShareItem values.
+type FileShareItemsPage struct {
+ fn func(context.Context, FileShareItems) (FileShareItems, error)
+ fsi FileShareItems
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *FileShareItemsPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.fsi)
+ if err != nil {
+ return err
+ }
+ page.fsi = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *FileShareItemsPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page FileShareItemsPage) NotDone() bool {
+ return !page.fsi.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page FileShareItemsPage) Response() FileShareItems {
+ return page.fsi
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page FileShareItemsPage) Values() []FileShareItem {
+ if page.fsi.IsEmpty() {
+ return nil
+ }
+ return *page.fsi.Value
+}
+
+// Creates a new instance of the FileShareItemsPage type.
+func NewFileShareItemsPage(cur FileShareItems, getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
+ return FileShareItemsPage{
+ fn: getNextPage,
+ fsi: cur,
+ }
+}
+
+// FileShareProperties the properties of the file share.
+type FileShareProperties struct {
+ // LastModifiedTime - READ-ONLY; Returns the date and time the share was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // Metadata - A name-value pair to associate with the share as metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ShareQuota - The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). For Large File Shares, the maximum size is 102400.
+ ShareQuota *int32 `json:"shareQuota,omitempty"`
+ // EnabledProtocols - The authentication protocol that is used for the file share. Can only be specified when creating a share. Possible values include: 'EnabledProtocolsSMB', 'EnabledProtocolsNFS'
+ EnabledProtocols EnabledProtocols `json:"enabledProtocols,omitempty"`
+ // RootSquash - The property is for NFS share only. The default is NoRootSquash. Possible values include: 'RootSquashTypeNoRootSquash', 'RootSquashTypeRootSquash', 'RootSquashTypeAllSquash'
+ RootSquash RootSquashType `json:"rootSquash,omitempty"`
+ // Version - READ-ONLY; The version of the share.
+ Version *string `json:"version,omitempty"`
+ // Deleted - READ-ONLY; Indicates whether the share was deleted.
+ Deleted *bool `json:"deleted,omitempty"`
+ // DeletedTime - READ-ONLY; The deleted time if the share was deleted.
+ DeletedTime *date.Time `json:"deletedTime,omitempty"`
+ // RemainingRetentionDays - READ-ONLY; Remaining retention days for share that was soft deleted.
+ RemainingRetentionDays *int32 `json:"remainingRetentionDays,omitempty"`
+ // AccessTier - Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. Possible values include: 'ShareAccessTierTransactionOptimized', 'ShareAccessTierHot', 'ShareAccessTierCool', 'ShareAccessTierPremium'
+ AccessTier ShareAccessTier `json:"accessTier,omitempty"`
+ // AccessTierChangeTime - READ-ONLY; Indicates the last modification time for share access tier.
+ AccessTierChangeTime *date.Time `json:"accessTierChangeTime,omitempty"`
+ // AccessTierStatus - READ-ONLY; Indicates if there is a pending transition for access tier.
+ AccessTierStatus *string `json:"accessTierStatus,omitempty"`
+ // ShareUsageBytes - READ-ONLY; The approximate size of the data stored on the share. Note that this value may not include all recently created or recently resized files.
+ ShareUsageBytes *int64 `json:"shareUsageBytes,omitempty"`
+ // LeaseStatus - READ-ONLY; The lease status of the share. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked'
+ LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"`
+ // LeaseState - READ-ONLY; Lease state of the share. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken'
+ LeaseState LeaseState `json:"leaseState,omitempty"`
+ // LeaseDuration - READ-ONLY; Specifies whether the lease on a share is of infinite or fixed duration, only when the share is leased. Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed'
+ LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"`
+ // SignedIdentifiers - List of stored access policies specified on the share.
+ SignedIdentifiers *[]SignedIdentifier `json:"signedIdentifiers,omitempty"`
+ // SnapshotTime - READ-ONLY; Creation time of share snapshot returned in the response of list shares with expand param "snapshots".
+ SnapshotTime *date.Time `json:"snapshotTime,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareProperties.
+func (fsp FileShareProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsp.Metadata != nil {
+ objectMap["metadata"] = fsp.Metadata
+ }
+ if fsp.ShareQuota != nil {
+ objectMap["shareQuota"] = fsp.ShareQuota
+ }
+ if fsp.EnabledProtocols != "" {
+ objectMap["enabledProtocols"] = fsp.EnabledProtocols
+ }
+ if fsp.RootSquash != "" {
+ objectMap["rootSquash"] = fsp.RootSquash
+ }
+ if fsp.AccessTier != "" {
+ objectMap["accessTier"] = fsp.AccessTier
+ }
+ if fsp.SignedIdentifiers != nil {
+ objectMap["signedIdentifiers"] = fsp.SignedIdentifiers
+ }
+ return json.Marshal(objectMap)
+}
+
+// GeoReplicationStats statistics related to replication for storage account's Blob, Table, Queue and File
+// services. It is only available when geo-redundant replication is enabled for the storage account.
+type GeoReplicationStats struct {
+ // Status - READ-ONLY; The status of the secondary location. Possible values are: - Live: Indicates that the secondary location is active and operational. - Bootstrap: Indicates initial synchronization from the primary location to the secondary location is in progress.This typically occurs when replication is first enabled. - Unavailable: Indicates that the secondary location is temporarily unavailable. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable'
+ Status GeoReplicationStatus `json:"status,omitempty"`
+ // LastSyncTime - READ-ONLY; All primary writes preceding this UTC date/time value are guaranteed to be available for read operations. Primary writes following this point in time may or may not be available for reads. Element may be default value if value of LastSyncTime is not available, this can happen if secondary is offline or we are in bootstrap.
+ LastSyncTime *date.Time `json:"lastSyncTime,omitempty"`
+ // CanFailover - READ-ONLY; A boolean flag which indicates whether or not account failover is supported for the account.
+ CanFailover *bool `json:"canFailover,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for GeoReplicationStats.
+func (grs GeoReplicationStats) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// Identity identity for the resource.
+type Identity struct {
+ // PrincipalID - READ-ONLY; The principal ID of resource identity.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // TenantID - READ-ONLY; The tenant ID of resource.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Type - The identity type. Possible values include: 'IdentityTypeNone', 'IdentityTypeSystemAssigned', 'IdentityTypeUserAssigned', 'IdentityTypeSystemAssignedUserAssigned'
+ Type IdentityType `json:"type,omitempty"`
+ // UserAssignedIdentities - Gets or sets a list of key value pairs that describe the set of User Assigned identities that will be used with this storage account. The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is permitted here.
+ UserAssignedIdentities map[string]*UserAssignedIdentity `json:"userAssignedIdentities"`
+}
+
+// MarshalJSON is the custom marshaler for Identity.
+func (i Identity) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if i.Type != "" {
+ objectMap["type"] = i.Type
+ }
+ if i.UserAssignedIdentities != nil {
+ objectMap["userAssignedIdentities"] = i.UserAssignedIdentities
+ }
+ return json.Marshal(objectMap)
+}
+
+// ImmutabilityPolicy the ImmutabilityPolicy property of a blob container, including Id, resource name,
+// resource type, Etag.
+type ImmutabilityPolicy struct {
+ autorest.Response `json:"-"`
+ // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container.
+ *ImmutabilityPolicyProperty `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImmutabilityPolicy.
+func (IP ImmutabilityPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if IP.ImmutabilityPolicyProperty != nil {
+ objectMap["properties"] = IP.ImmutabilityPolicyProperty
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicy struct.
+func (IP *ImmutabilityPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var immutabilityPolicyProperty ImmutabilityPolicyProperty
+ err = json.Unmarshal(*v, &immutabilityPolicyProperty)
+ if err != nil {
+ return err
+ }
+ IP.ImmutabilityPolicyProperty = &immutabilityPolicyProperty
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ IP.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ IP.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ IP.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ IP.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ImmutabilityPolicyProperties the properties of an ImmutabilityPolicy of a blob container.
+type ImmutabilityPolicyProperties struct {
+ // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container.
+ *ImmutabilityPolicyProperty `json:"properties,omitempty"`
+ // Etag - READ-ONLY; ImmutabilityPolicy Etag.
+ Etag *string `json:"etag,omitempty"`
+ // UpdateHistory - READ-ONLY; The ImmutabilityPolicy update history of the blob container.
+ UpdateHistory *[]UpdateHistoryProperty `json:"updateHistory,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperties.
+func (ipp ImmutabilityPolicyProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ipp.ImmutabilityPolicyProperty != nil {
+ objectMap["properties"] = ipp.ImmutabilityPolicyProperty
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicyProperties struct.
+func (ipp *ImmutabilityPolicyProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var immutabilityPolicyProperty ImmutabilityPolicyProperty
+ err = json.Unmarshal(*v, &immutabilityPolicyProperty)
+ if err != nil {
+ return err
+ }
+ ipp.ImmutabilityPolicyProperty = &immutabilityPolicyProperty
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ ipp.Etag = &etag
+ }
+ case "updateHistory":
+ if v != nil {
+ var updateHistory []UpdateHistoryProperty
+ err = json.Unmarshal(*v, &updateHistory)
+ if err != nil {
+ return err
+ }
+ ipp.UpdateHistory = &updateHistory
+ }
+ }
+ }
+
+ return nil
+}
+
+// ImmutabilityPolicyProperty the properties of an ImmutabilityPolicy of a blob container.
+type ImmutabilityPolicyProperty struct {
+ // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days.
+ ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"`
+ // State - READ-ONLY; The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. Possible values include: 'ImmutabilityPolicyStateLocked', 'ImmutabilityPolicyStateUnlocked'
+ State ImmutabilityPolicyState `json:"state,omitempty"`
+ // AllowProtectedAppendWrites - This property can only be changed for unlocked time-based retention policies. When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. This property cannot be changed with ExtendImmutabilityPolicy API.
+ AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty"`
+ // AllowProtectedAppendWritesAll - This property can only be changed for unlocked time-based retention policies. When enabled, new blocks can be written to both 'Append and Bock Blobs' while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. This property cannot be changed with ExtendImmutabilityPolicy API. The 'allowProtectedAppendWrites' and 'allowProtectedAppendWritesAll' properties are mutually exclusive.
+ AllowProtectedAppendWritesAll *bool `json:"allowProtectedAppendWritesAll,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperty.
+func (ipp ImmutabilityPolicyProperty) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ipp.ImmutabilityPeriodSinceCreationInDays != nil {
+ objectMap["immutabilityPeriodSinceCreationInDays"] = ipp.ImmutabilityPeriodSinceCreationInDays
+ }
+ if ipp.AllowProtectedAppendWrites != nil {
+ objectMap["allowProtectedAppendWrites"] = ipp.AllowProtectedAppendWrites
+ }
+ if ipp.AllowProtectedAppendWritesAll != nil {
+ objectMap["allowProtectedAppendWritesAll"] = ipp.AllowProtectedAppendWritesAll
+ }
+ return json.Marshal(objectMap)
+}
+
+// ImmutableStorageAccount this property enables and defines account-level immutability. Enabling the
+// feature auto-enables Blob Versioning.
+type ImmutableStorageAccount struct {
+ // Enabled - A boolean flag which enables account-level immutability. All the containers under such an account have object-level immutability enabled by default.
+ Enabled *bool `json:"enabled,omitempty"`
+ // ImmutabilityPolicy - Specifies the default account-level immutability policy which is inherited and applied to objects that do not possess an explicit immutability policy at the object level. The object-level immutability policy has higher precedence than the container-level immutability policy, which has a higher precedence than the account-level immutability policy.
+ ImmutabilityPolicy *AccountImmutabilityPolicyProperties `json:"immutabilityPolicy,omitempty"`
+}
+
+// ImmutableStorageWithVersioning object level immutability properties of the container.
+type ImmutableStorageWithVersioning struct {
+ // Enabled - This is an immutable property, when set to true it enables object level immutability at the container level.
+ Enabled *bool `json:"enabled,omitempty"`
+ // TimeStamp - READ-ONLY; Returns the date and time the object level immutability was enabled.
+ TimeStamp *date.Time `json:"timeStamp,omitempty"`
+ // MigrationState - READ-ONLY; This property denotes the container level immutability to object level immutability migration state. Possible values include: 'MigrationStateInProgress', 'MigrationStateCompleted'
+ MigrationState MigrationState `json:"migrationState,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImmutableStorageWithVersioning.
+func (iswv ImmutableStorageWithVersioning) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if iswv.Enabled != nil {
+ objectMap["enabled"] = iswv.Enabled
+ }
+ return json.Marshal(objectMap)
+}
+
+// IPRule IP rule with specific IP or IP range in CIDR format.
+type IPRule struct {
+ // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed.
+ IPAddressOrRange *string `json:"value,omitempty"`
+ // Action - The action of IP ACL rule. Possible values include: 'ActionAllow'
+ Action Action `json:"action,omitempty"`
+}
+
+// KeyCreationTime storage account keys creation time.
+type KeyCreationTime struct {
+ Key1 *date.Time `json:"key1,omitempty"`
+ Key2 *date.Time `json:"key2,omitempty"`
+}
+
+// KeyPolicy keyPolicy assigned to the storage account.
+type KeyPolicy struct {
+ // KeyExpirationPeriodInDays - The key expiration period in days.
+ KeyExpirationPeriodInDays *int32 `json:"keyExpirationPeriodInDays,omitempty"`
+}
+
+// KeyVaultProperties properties of key vault.
+type KeyVaultProperties struct {
+ // KeyName - The name of KeyVault key.
+ KeyName *string `json:"keyname,omitempty"`
+ // KeyVersion - The version of KeyVault key.
+ KeyVersion *string `json:"keyversion,omitempty"`
+ // KeyVaultURI - The Uri of KeyVault.
+ KeyVaultURI *string `json:"keyvaulturi,omitempty"`
+ // CurrentVersionedKeyIdentifier - READ-ONLY; The object identifier of the current versioned Key Vault Key in use.
+ CurrentVersionedKeyIdentifier *string `json:"currentVersionedKeyIdentifier,omitempty"`
+ // LastKeyRotationTimestamp - READ-ONLY; Timestamp of last rotation of the Key Vault Key.
+ LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"`
+ // CurrentVersionedKeyExpirationTimestamp - READ-ONLY; This is a read only property that represents the expiration time of the current version of the customer managed key used for encryption.
+ CurrentVersionedKeyExpirationTimestamp *date.Time `json:"currentVersionedKeyExpirationTimestamp,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for KeyVaultProperties.
+func (kvp KeyVaultProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if kvp.KeyName != nil {
+ objectMap["keyname"] = kvp.KeyName
+ }
+ if kvp.KeyVersion != nil {
+ objectMap["keyversion"] = kvp.KeyVersion
+ }
+ if kvp.KeyVaultURI != nil {
+ objectMap["keyvaulturi"] = kvp.KeyVaultURI
+ }
+ return json.Marshal(objectMap)
+}
+
+// LastAccessTimeTrackingPolicy the blob service properties for Last access time based tracking policy.
+type LastAccessTimeTrackingPolicy struct {
+ // Enable - When set to true last access time based tracking is enabled.
+ Enable *bool `json:"enable,omitempty"`
+ // Name - Name of the policy. The valid value is AccessTimeTracking. This field is currently read only. Possible values include: 'NameAccessTimeTracking'
+ Name Name `json:"name,omitempty"`
+ // TrackingGranularityInDays - The field specifies blob object tracking granularity in days, typically how often the blob object should be tracked.This field is currently read only with value as 1
+ TrackingGranularityInDays *int32 `json:"trackingGranularityInDays,omitempty"`
+ // BlobType - An array of predefined supported blob types. Only blockBlob is the supported value. This field is currently read only
+ BlobType *[]string `json:"blobType,omitempty"`
+}
+
+// LeaseContainerRequest lease Container request schema.
+type LeaseContainerRequest struct {
+ // Action - Specifies the lease action. Can be one of the available actions. Possible values include: 'Action1Acquire', 'Action1Renew', 'Action1Change', 'Action1Release', 'Action1Break'
+ Action Action1 `json:"action,omitempty"`
+ // LeaseID - Identifies the lease. Can be specified in any valid GUID string format.
+ LeaseID *string `json:"leaseId,omitempty"`
+ // BreakPeriod - Optional. For a break action, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60.
+ BreakPeriod *int32 `json:"breakPeriod,omitempty"`
+ // LeaseDuration - Required for acquire. Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires.
+ LeaseDuration *int32 `json:"leaseDuration,omitempty"`
+ // ProposedLeaseID - Optional for acquire, required for change. Proposed lease ID, in a GUID string format.
+ ProposedLeaseID *string `json:"proposedLeaseId,omitempty"`
+}
+
+// LeaseContainerResponse lease Container response schema.
+type LeaseContainerResponse struct {
+ autorest.Response `json:"-"`
+ // LeaseID - Returned unique lease ID that must be included with any request to delete the container, or to renew, change, or release the lease.
+ LeaseID *string `json:"leaseId,omitempty"`
+ // LeaseTimeSeconds - Approximate time remaining in the lease period, in seconds.
+ LeaseTimeSeconds *string `json:"leaseTimeSeconds,omitempty"`
+}
+
+// LeaseShareRequest lease Share request schema.
+type LeaseShareRequest struct {
+ // Action - Specifies the lease action. Can be one of the available actions. Possible values include: 'LeaseShareActionAcquire', 'LeaseShareActionRenew', 'LeaseShareActionChange', 'LeaseShareActionRelease', 'LeaseShareActionBreak'
+ Action LeaseShareAction `json:"action,omitempty"`
+ // LeaseID - Identifies the lease. Can be specified in any valid GUID string format.
+ LeaseID *string `json:"leaseId,omitempty"`
+ // BreakPeriod - Optional. For a break action, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60.
+ BreakPeriod *int32 `json:"breakPeriod,omitempty"`
+ // LeaseDuration - Required for acquire. Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires.
+ LeaseDuration *int32 `json:"leaseDuration,omitempty"`
+ // ProposedLeaseID - Optional for acquire, required for change. Proposed lease ID, in a GUID string format.
+ ProposedLeaseID *string `json:"proposedLeaseId,omitempty"`
+}
+
+// LeaseShareResponse lease Share response schema.
+type LeaseShareResponse struct {
+ autorest.Response `json:"-"`
+ // LeaseID - Returned unique lease ID that must be included with any request to delete the share, or to renew, change, or release the lease.
+ LeaseID *string `json:"leaseId,omitempty"`
+ // LeaseTimeSeconds - Approximate time remaining in the lease period, in seconds.
+ LeaseTimeSeconds *string `json:"leaseTimeSeconds,omitempty"`
+}
+
+// LegalHold the LegalHold property of a blob container.
+type LegalHold struct {
+ autorest.Response `json:"-"`
+ // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account.
+ HasLegalHold *bool `json:"hasLegalHold,omitempty"`
+ // Tags - Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case at SRP.
+ Tags *[]string `json:"tags,omitempty"`
+ // AllowProtectedAppendWritesAll - When enabled, new blocks can be written to both 'Append and Bock Blobs' while maintaining legal hold protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted.
+ AllowProtectedAppendWritesAll *bool `json:"allowProtectedAppendWritesAll,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for LegalHold.
+func (lh LegalHold) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lh.Tags != nil {
+ objectMap["tags"] = lh.Tags
+ }
+ if lh.AllowProtectedAppendWritesAll != nil {
+ objectMap["allowProtectedAppendWritesAll"] = lh.AllowProtectedAppendWritesAll
+ }
+ return json.Marshal(objectMap)
+}
+
+// LegalHoldProperties the LegalHold property of a blob container.
+type LegalHoldProperties struct {
+ // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account.
+ HasLegalHold *bool `json:"hasLegalHold,omitempty"`
+ // Tags - The list of LegalHold tags of a blob container.
+ Tags *[]TagProperty `json:"tags,omitempty"`
+ // ProtectedAppendWritesHistory - Protected append blob writes history.
+ ProtectedAppendWritesHistory *ProtectedAppendWritesHistory `json:"protectedAppendWritesHistory,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for LegalHoldProperties.
+func (lhp LegalHoldProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lhp.Tags != nil {
+ objectMap["tags"] = lhp.Tags
+ }
+ if lhp.ProtectedAppendWritesHistory != nil {
+ objectMap["protectedAppendWritesHistory"] = lhp.ProtectedAppendWritesHistory
+ }
+ return json.Marshal(objectMap)
+}
+
+// ListAccountSasResponse the List SAS credentials operation response.
+type ListAccountSasResponse struct {
+ autorest.Response `json:"-"`
+ // AccountSasToken - READ-ONLY; List SAS credentials of storage account.
+ AccountSasToken *string `json:"accountSasToken,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListAccountSasResponse.
+func (lasr ListAccountSasResponse) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListBlobInventoryPolicy list of blob inventory policies returned.
+type ListBlobInventoryPolicy struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of blob inventory policies.
+ Value *[]BlobInventoryPolicy `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListBlobInventoryPolicy.
+func (lbip ListBlobInventoryPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListContainerItem the blob container properties be listed out.
+type ListContainerItem struct {
+ // ContainerProperties - The blob container properties be listed out.
+ *ContainerProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListContainerItem.
+func (lci ListContainerItem) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lci.ContainerProperties != nil {
+ objectMap["properties"] = lci.ContainerProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ListContainerItem struct.
+func (lci *ListContainerItem) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var containerProperties ContainerProperties
+ err = json.Unmarshal(*v, &containerProperties)
+ if err != nil {
+ return err
+ }
+ lci.ContainerProperties = &containerProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ lci.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ lci.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ lci.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ lci.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ListContainerItems response schema. Contains list of blobs returned, and if paging is requested or
+// required, a URL to next page of containers.
+type ListContainerItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of blobs containers returned.
+ Value *[]ListContainerItem `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of containers. Returned when total number of requested containers exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListContainerItems.
+func (lci ListContainerItems) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListContainerItemsIterator provides access to a complete listing of ListContainerItem values.
+type ListContainerItemsIterator struct {
+ i int
+ page ListContainerItemsPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListContainerItemsIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListContainerItemsIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListContainerItemsIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListContainerItemsIterator) Response() ListContainerItems {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListContainerItemsIterator) Value() ListContainerItem {
+ if !iter.page.NotDone() {
+ return ListContainerItem{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListContainerItemsIterator type.
+func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator {
+ return ListContainerItemsIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lci ListContainerItems) IsEmpty() bool {
+ return lci.Value == nil || len(*lci.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (lci ListContainerItems) hasNextLink() bool {
+ return lci.NextLink != nil && len(*lci.NextLink) != 0
+}
+
+// listContainerItemsPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lci ListContainerItems) listContainerItemsPreparer(ctx context.Context) (*http.Request, error) {
+ if !lci.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lci.NextLink)))
+}
+
+// ListContainerItemsPage contains a page of ListContainerItem values.
+type ListContainerItemsPage struct {
+ fn func(context.Context, ListContainerItems) (ListContainerItems, error)
+ lci ListContainerItems
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListContainerItemsPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.lci)
+ if err != nil {
+ return err
+ }
+ page.lci = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListContainerItemsPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListContainerItemsPage) NotDone() bool {
+ return !page.lci.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListContainerItemsPage) Response() ListContainerItems {
+ return page.lci
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListContainerItemsPage) Values() []ListContainerItem {
+ if page.lci.IsEmpty() {
+ return nil
+ }
+ return *page.lci.Value
+}
+
+// Creates a new instance of the ListContainerItemsPage type.
+func NewListContainerItemsPage(cur ListContainerItems, getNextPage func(context.Context, ListContainerItems) (ListContainerItems, error)) ListContainerItemsPage {
+ return ListContainerItemsPage{
+ fn: getNextPage,
+ lci: cur,
+ }
+}
+
+// ListQueue ...
+type ListQueue struct {
+ // ListQueueProperties - List Queue resource properties.
+ *ListQueueProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListQueue.
+func (lq ListQueue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lq.ListQueueProperties != nil {
+ objectMap["properties"] = lq.ListQueueProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ListQueue struct.
+func (lq *ListQueue) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var queueProperties ListQueueProperties
+ err = json.Unmarshal(*v, &queueProperties)
+ if err != nil {
+ return err
+ }
+ lq.ListQueueProperties = &queueProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ lq.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ lq.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ lq.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ListQueueProperties ...
+type ListQueueProperties struct {
+ // Metadata - A name-value pair that represents queue metadata.
+ Metadata map[string]*string `json:"metadata"`
+}
+
+// MarshalJSON is the custom marshaler for ListQueueProperties.
+func (lqp ListQueueProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lqp.Metadata != nil {
+ objectMap["metadata"] = lqp.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// ListQueueResource response schema. Contains list of queues returned
+type ListQueueResource struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of queues returned.
+ Value *[]ListQueue `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to list next page of queues
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListQueueResource.
+func (lqr ListQueueResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListQueueResourceIterator provides access to a complete listing of ListQueue values.
+type ListQueueResourceIterator struct {
+ i int
+ page ListQueueResourcePage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListQueueResourceIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListQueueResourceIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListQueueResourceIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListQueueResourceIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListQueueResourceIterator) Response() ListQueueResource {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListQueueResourceIterator) Value() ListQueue {
+ if !iter.page.NotDone() {
+ return ListQueue{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListQueueResourceIterator type.
+func NewListQueueResourceIterator(page ListQueueResourcePage) ListQueueResourceIterator {
+ return ListQueueResourceIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lqr ListQueueResource) IsEmpty() bool {
+ return lqr.Value == nil || len(*lqr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (lqr ListQueueResource) hasNextLink() bool {
+ return lqr.NextLink != nil && len(*lqr.NextLink) != 0
+}
+
+// listQueueResourcePreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lqr ListQueueResource) listQueueResourcePreparer(ctx context.Context) (*http.Request, error) {
+ if !lqr.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lqr.NextLink)))
+}
+
+// ListQueueResourcePage contains a page of ListQueue values.
+type ListQueueResourcePage struct {
+ fn func(context.Context, ListQueueResource) (ListQueueResource, error)
+ lqr ListQueueResource
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListQueueResourcePage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListQueueResourcePage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.lqr)
+ if err != nil {
+ return err
+ }
+ page.lqr = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListQueueResourcePage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListQueueResourcePage) NotDone() bool {
+ return !page.lqr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListQueueResourcePage) Response() ListQueueResource {
+ return page.lqr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListQueueResourcePage) Values() []ListQueue {
+ if page.lqr.IsEmpty() {
+ return nil
+ }
+ return *page.lqr.Value
+}
+
+// Creates a new instance of the ListQueueResourcePage type.
+func NewListQueueResourcePage(cur ListQueueResource, getNextPage func(context.Context, ListQueueResource) (ListQueueResource, error)) ListQueueResourcePage {
+ return ListQueueResourcePage{
+ fn: getNextPage,
+ lqr: cur,
+ }
+}
+
+// ListQueueServices ...
+type ListQueueServices struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of queue services returned.
+ Value *[]QueueServiceProperties `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListQueueServices.
+func (lqs ListQueueServices) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListServiceSasResponse the List service SAS credentials operation response.
+type ListServiceSasResponse struct {
+ autorest.Response `json:"-"`
+ // ServiceSasToken - READ-ONLY; List service SAS credentials of specific resource.
+ ServiceSasToken *string `json:"serviceSasToken,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListServiceSasResponse.
+func (lssr ListServiceSasResponse) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListTableResource response schema. Contains list of tables returned
+type ListTableResource struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of tables returned.
+ Value *[]Table `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of tables
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListTableResource.
+func (ltr ListTableResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ListTableResourceIterator provides access to a complete listing of Table values.
+type ListTableResourceIterator struct {
+ i int
+ page ListTableResourcePage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListTableResourceIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListTableResourceIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListTableResourceIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListTableResourceIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListTableResourceIterator) Response() ListTableResource {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListTableResourceIterator) Value() Table {
+ if !iter.page.NotDone() {
+ return Table{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListTableResourceIterator type.
+func NewListTableResourceIterator(page ListTableResourcePage) ListTableResourceIterator {
+ return ListTableResourceIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ltr ListTableResource) IsEmpty() bool {
+ return ltr.Value == nil || len(*ltr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (ltr ListTableResource) hasNextLink() bool {
+ return ltr.NextLink != nil && len(*ltr.NextLink) != 0
+}
+
+// listTableResourcePreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ltr ListTableResource) listTableResourcePreparer(ctx context.Context) (*http.Request, error) {
+ if !ltr.hasNextLink() {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ltr.NextLink)))
+}
+
+// ListTableResourcePage contains a page of Table values.
+type ListTableResourcePage struct {
+ fn func(context.Context, ListTableResource) (ListTableResource, error)
+ ltr ListTableResource
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListTableResourcePage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListTableResourcePage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ for {
+ next, err := page.fn(ctx, page.ltr)
+ if err != nil {
+ return err
+ }
+ page.ltr = next
+ if !next.hasNextLink() || !next.IsEmpty() {
+ break
+ }
+ }
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListTableResourcePage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListTableResourcePage) NotDone() bool {
+ return !page.ltr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListTableResourcePage) Response() ListTableResource {
+ return page.ltr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListTableResourcePage) Values() []Table {
+ if page.ltr.IsEmpty() {
+ return nil
+ }
+ return *page.ltr.Value
+}
+
+// Creates a new instance of the ListTableResourcePage type.
+func NewListTableResourcePage(cur ListTableResource, getNextPage func(context.Context, ListTableResource) (ListTableResource, error)) ListTableResourcePage {
+ return ListTableResourcePage{
+ fn: getNextPage,
+ ltr: cur,
+ }
+}
+
+// ListTableServices ...
+type ListTableServices struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of table services returned.
+ Value *[]TableServiceProperties `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListTableServices.
+func (lts ListTableServices) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// LocalUser the local user associated with the storage accounts.
+type LocalUser struct {
+ autorest.Response `json:"-"`
+ // LocalUserProperties - Storage account local user properties.
+ *LocalUserProperties `json:"properties,omitempty"`
+ SystemData *SystemData `json:"systemData,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for LocalUser.
+func (lu LocalUser) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lu.LocalUserProperties != nil {
+ objectMap["properties"] = lu.LocalUserProperties
+ }
+ if lu.SystemData != nil {
+ objectMap["systemData"] = lu.SystemData
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for LocalUser struct.
+func (lu *LocalUser) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var localUserProperties LocalUserProperties
+ err = json.Unmarshal(*v, &localUserProperties)
+ if err != nil {
+ return err
+ }
+ lu.LocalUserProperties = &localUserProperties
+ }
+ case "systemData":
+ if v != nil {
+ var systemData SystemData
+ err = json.Unmarshal(*v, &systemData)
+ if err != nil {
+ return err
+ }
+ lu.SystemData = &systemData
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ lu.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ lu.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ lu.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// LocalUserKeys the Storage Account Local User keys.
+type LocalUserKeys struct {
+ autorest.Response `json:"-"`
+ SSHAuthorizedKeys *[]SSHPublicKey `json:"sshAuthorizedKeys,omitempty"`
+ SharedKey *string `json:"sharedKey,omitempty"`
+}
+
+// LocalUserProperties the Storage Account Local User properties.
+type LocalUserProperties struct {
+ // PermissionScopes - The permission scopes of the local user.
+ PermissionScopes *[]PermissionScope `json:"permissionScopes,omitempty"`
+ // HomeDirectory - Optional, local user home directory.
+ HomeDirectory *string `json:"homeDirectory,omitempty"`
+ SSHAuthorizedKeys *[]SSHPublicKey `json:"sshAuthorizedKeys,omitempty"`
+ // Sid - READ-ONLY; A unique Security Identifier that is generated by the server.
+ Sid *string `json:"sid,omitempty"`
+ // HasSharedKey - Indicates whether shared key exists. Set it to false to remove existing shared key.
+ HasSharedKey *bool `json:"hasSharedKey,omitempty"`
+ // HasSSHKey - Indicates whether ssh key exists. Set it to false to remove existing SSH key.
+ HasSSHKey *bool `json:"hasSshKey,omitempty"`
+ // HasSSHPassword - Indicates whether ssh password exists. Set it to false to remove existing SSH password.
+ HasSSHPassword *bool `json:"hasSshPassword,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for LocalUserProperties.
+func (lup LocalUserProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lup.PermissionScopes != nil {
+ objectMap["permissionScopes"] = lup.PermissionScopes
+ }
+ if lup.HomeDirectory != nil {
+ objectMap["homeDirectory"] = lup.HomeDirectory
+ }
+ if lup.SSHAuthorizedKeys != nil {
+ objectMap["sshAuthorizedKeys"] = lup.SSHAuthorizedKeys
+ }
+ if lup.HasSharedKey != nil {
+ objectMap["hasSharedKey"] = lup.HasSharedKey
+ }
+ if lup.HasSSHKey != nil {
+ objectMap["hasSshKey"] = lup.HasSSHKey
+ }
+ if lup.HasSSHPassword != nil {
+ objectMap["hasSshPassword"] = lup.HasSSHPassword
+ }
+ return json.Marshal(objectMap)
+}
+
+// LocalUserRegeneratePasswordResult the secrets of Storage Account Local User.
+type LocalUserRegeneratePasswordResult struct {
+ autorest.Response `json:"-"`
+ // SSHPassword - READ-ONLY; Auto generated password by the server for SSH authentication if hasSshPassword is set to true on the creation of local user.
+ SSHPassword *string `json:"sshPassword,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for LocalUserRegeneratePasswordResult.
+func (lurpr LocalUserRegeneratePasswordResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// LocalUsers list storage account local users.
+type LocalUsers struct {
+ autorest.Response `json:"-"`
+ // Value - The local users associated with the storage account.
+ Value *[]LocalUser `json:"value,omitempty"`
+}
+
+// ManagementPolicy the Get Storage Account ManagementPolicies operation response.
+type ManagementPolicy struct {
+ autorest.Response `json:"-"`
+ // ManagementPolicyProperties - Returns the Storage Account Data Policies Rules.
+ *ManagementPolicyProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagementPolicy.
+func (mp ManagementPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mp.ManagementPolicyProperties != nil {
+ objectMap["properties"] = mp.ManagementPolicyProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagementPolicy struct.
+func (mp *ManagementPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var managementPolicyProperties ManagementPolicyProperties
+ err = json.Unmarshal(*v, &managementPolicyProperties)
+ if err != nil {
+ return err
+ }
+ mp.ManagementPolicyProperties = &managementPolicyProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagementPolicyAction actions are applied to the filtered blobs when the execution condition is met.
+type ManagementPolicyAction struct {
+ // BaseBlob - The management policy action for base blob
+ BaseBlob *ManagementPolicyBaseBlob `json:"baseBlob,omitempty"`
+ // Snapshot - The management policy action for snapshot
+ Snapshot *ManagementPolicySnapShot `json:"snapshot,omitempty"`
+ // Version - The management policy action for version
+ Version *ManagementPolicyVersion `json:"version,omitempty"`
+}
+
+// ManagementPolicyBaseBlob management policy action for base blob.
+type ManagementPolicyBaseBlob struct {
+ // TierToCool - The function to tier blobs to cool storage. Support blobs currently at Hot tier
+ TierToCool *DateAfterModification `json:"tierToCool,omitempty"`
+ // TierToArchive - The function to tier blobs to archive storage. Support blobs currently at Hot or Cool tier
+ TierToArchive *DateAfterModification `json:"tierToArchive,omitempty"`
+ // Delete - The function to delete the blob
+ Delete *DateAfterModification `json:"delete,omitempty"`
+ // EnableAutoTierToHotFromCool - This property enables auto tiering of a blob from cool to hot on a blob access. This property requires tierToCool.daysAfterLastAccessTimeGreaterThan.
+ EnableAutoTierToHotFromCool *bool `json:"enableAutoTierToHotFromCool,omitempty"`
+}
+
+// ManagementPolicyDefinition an object that defines the Lifecycle rule. Each definition is made up with a
+// filters set and an actions set.
+type ManagementPolicyDefinition struct {
+ // Actions - An object that defines the action set.
+ Actions *ManagementPolicyAction `json:"actions,omitempty"`
+ // Filters - An object that defines the filter set.
+ Filters *ManagementPolicyFilter `json:"filters,omitempty"`
+}
+
+// ManagementPolicyFilter filters limit rule actions to a subset of blobs within the storage account. If
+// multiple filters are defined, a logical AND is performed on all filters.
+type ManagementPolicyFilter struct {
+ // PrefixMatch - An array of strings for prefixes to be match.
+ PrefixMatch *[]string `json:"prefixMatch,omitempty"`
+ // BlobTypes - An array of predefined enum values. Currently blockBlob supports all tiering and delete actions. Only delete actions are supported for appendBlob.
+ BlobTypes *[]string `json:"blobTypes,omitempty"`
+ // BlobIndexMatch - An array of blob index tag based filters, there can be at most 10 tag filters
+ BlobIndexMatch *[]TagFilter `json:"blobIndexMatch,omitempty"`
+}
+
+// ManagementPolicyProperties the Storage Account ManagementPolicy properties.
+type ManagementPolicyProperties struct {
+ // LastModifiedTime - READ-ONLY; Returns the date and time the ManagementPolicies was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // Policy - The Storage Account ManagementPolicy, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts.
+ Policy *ManagementPolicySchema `json:"policy,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagementPolicyProperties.
+func (mpp ManagementPolicyProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mpp.Policy != nil {
+ objectMap["policy"] = mpp.Policy
+ }
+ return json.Marshal(objectMap)
+}
+
+// ManagementPolicyRule an object that wraps the Lifecycle rule. Each rule is uniquely defined by name.
+type ManagementPolicyRule struct {
+ // Enabled - Rule is enabled if set to true.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy.
+ Name *string `json:"name,omitempty"`
+ // Type - The valid value is Lifecycle
+ Type *string `json:"type,omitempty"`
+ // Definition - An object that defines the Lifecycle rule.
+ Definition *ManagementPolicyDefinition `json:"definition,omitempty"`
+}
+
+// ManagementPolicySchema the Storage Account ManagementPolicies Rules. See more details in:
+// https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts.
+type ManagementPolicySchema struct {
+ // Rules - The Storage Account ManagementPolicies Rules. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts.
+ Rules *[]ManagementPolicyRule `json:"rules,omitempty"`
+}
+
+// ManagementPolicySnapShot management policy action for snapshot.
+type ManagementPolicySnapShot struct {
+ // TierToCool - The function to tier blob snapshot to cool storage. Support blob snapshot currently at Hot tier
+ TierToCool *DateAfterCreation `json:"tierToCool,omitempty"`
+ // TierToArchive - The function to tier blob snapshot to archive storage. Support blob snapshot currently at Hot or Cool tier
+ TierToArchive *DateAfterCreation `json:"tierToArchive,omitempty"`
+ // Delete - The function to delete the blob snapshot
+ Delete *DateAfterCreation `json:"delete,omitempty"`
+}
+
+// ManagementPolicyVersion management policy action for blob version.
+type ManagementPolicyVersion struct {
+ // TierToCool - The function to tier blob version to cool storage. Support blob version currently at Hot tier
+ TierToCool *DateAfterCreation `json:"tierToCool,omitempty"`
+ // TierToArchive - The function to tier blob version to archive storage. Support blob version currently at Hot or Cool tier
+ TierToArchive *DateAfterCreation `json:"tierToArchive,omitempty"`
+ // Delete - The function to delete the blob version
+ Delete *DateAfterCreation `json:"delete,omitempty"`
+}
+
+// MetricSpecification metric specification of operation.
+type MetricSpecification struct {
+ // Name - Name of metric specification.
+ Name *string `json:"name,omitempty"`
+ // DisplayName - Display name of metric specification.
+ DisplayName *string `json:"displayName,omitempty"`
+ // DisplayDescription - Display description of metric specification.
+ DisplayDescription *string `json:"displayDescription,omitempty"`
+ // Unit - Unit could be Bytes or Count.
+ Unit *string `json:"unit,omitempty"`
+ // Dimensions - Dimensions of blobs, including blob type and access tier.
+ Dimensions *[]Dimension `json:"dimensions,omitempty"`
+ // AggregationType - Aggregation type could be Average.
+ AggregationType *string `json:"aggregationType,omitempty"`
+ // FillGapWithZero - The property to decide fill gap with zero or not.
+ FillGapWithZero *bool `json:"fillGapWithZero,omitempty"`
+ // Category - The category this metric specification belong to, could be Capacity.
+ Category *string `json:"category,omitempty"`
+ // ResourceIDDimensionNameOverride - Account Resource Id.
+ ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"`
+}
+
+// Multichannel multichannel setting. Applies to Premium FileStorage only.
+type Multichannel struct {
+ // Enabled - Indicates whether multichannel is enabled
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// NetworkRuleSet network rule set
+type NetworkRuleSet struct {
+ // Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'BypassNone', 'BypassLogging', 'BypassMetrics', 'BypassAzureServices'
+ Bypass Bypass `json:"bypass,omitempty"`
+ // ResourceAccessRules - Sets the resource access rules
+ ResourceAccessRules *[]ResourceAccessRule `json:"resourceAccessRules,omitempty"`
+ // VirtualNetworkRules - Sets the virtual network rules
+ VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"`
+ // IPRules - Sets the IP ACL rules
+ IPRules *[]IPRule `json:"ipRules,omitempty"`
+ // DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny'
+ DefaultAction DefaultAction `json:"defaultAction,omitempty"`
+}
+
+// ObjectReplicationPolicies list storage account object replication policies.
+type ObjectReplicationPolicies struct {
+ autorest.Response `json:"-"`
+ // Value - The replication policy between two storage accounts.
+ Value *[]ObjectReplicationPolicy `json:"value,omitempty"`
+}
+
+// ObjectReplicationPolicy the replication policy between two storage accounts. Multiple rules can be
+// defined in one policy.
+type ObjectReplicationPolicy struct {
+ autorest.Response `json:"-"`
+ // ObjectReplicationPolicyProperties - Returns the Storage Account Object Replication Policy.
+ *ObjectReplicationPolicyProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ObjectReplicationPolicy.
+func (orp ObjectReplicationPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if orp.ObjectReplicationPolicyProperties != nil {
+ objectMap["properties"] = orp.ObjectReplicationPolicyProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ObjectReplicationPolicy struct.
+func (orp *ObjectReplicationPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var objectReplicationPolicyProperties ObjectReplicationPolicyProperties
+ err = json.Unmarshal(*v, &objectReplicationPolicyProperties)
+ if err != nil {
+ return err
+ }
+ orp.ObjectReplicationPolicyProperties = &objectReplicationPolicyProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ orp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ orp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ orp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ObjectReplicationPolicyFilter filters limit replication to a subset of blobs within the storage account.
+// A logical OR is performed on values in the filter. If multiple filters are defined, a logical AND is
+// performed on all filters.
+type ObjectReplicationPolicyFilter struct {
+ // PrefixMatch - Optional. Filters the results to replicate only blobs whose names begin with the specified prefix.
+ PrefixMatch *[]string `json:"prefixMatch,omitempty"`
+ // MinCreationTime - Blobs created after the time will be replicated to the destination. It must be in datetime format 'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z
+ MinCreationTime *string `json:"minCreationTime,omitempty"`
+}
+
+// ObjectReplicationPolicyProperties the Storage Account ObjectReplicationPolicy properties.
+type ObjectReplicationPolicyProperties struct {
+ // PolicyID - READ-ONLY; A unique id for object replication policy.
+ PolicyID *string `json:"policyId,omitempty"`
+ // EnabledTime - READ-ONLY; Indicates when the policy is enabled on the source account.
+ EnabledTime *date.Time `json:"enabledTime,omitempty"`
+ // SourceAccount - Required. Source account name. It should be full resource id if allowCrossTenantReplication set to false.
+ SourceAccount *string `json:"sourceAccount,omitempty"`
+ // DestinationAccount - Required. Destination account name. It should be full resource id if allowCrossTenantReplication set to false.
+ DestinationAccount *string `json:"destinationAccount,omitempty"`
+ // Rules - The storage account object replication rules.
+ Rules *[]ObjectReplicationPolicyRule `json:"rules,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ObjectReplicationPolicyProperties.
+func (orpp ObjectReplicationPolicyProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if orpp.SourceAccount != nil {
+ objectMap["sourceAccount"] = orpp.SourceAccount
+ }
+ if orpp.DestinationAccount != nil {
+ objectMap["destinationAccount"] = orpp.DestinationAccount
+ }
+ if orpp.Rules != nil {
+ objectMap["rules"] = orpp.Rules
+ }
+ return json.Marshal(objectMap)
+}
+
+// ObjectReplicationPolicyRule the replication policy rule between two containers.
+type ObjectReplicationPolicyRule struct {
+ // RuleID - Rule Id is auto-generated for each new rule on destination account. It is required for put policy on source account.
+ RuleID *string `json:"ruleId,omitempty"`
+ // SourceContainer - Required. Source container name.
+ SourceContainer *string `json:"sourceContainer,omitempty"`
+ // DestinationContainer - Required. Destination container name.
+ DestinationContainer *string `json:"destinationContainer,omitempty"`
+ // Filters - Optional. An object that defines the filter set.
+ Filters *ObjectReplicationPolicyFilter `json:"filters,omitempty"`
+}
+
+// Operation storage REST API operation definition.
+type Operation struct {
+ // Name - Operation name: {provider}/{resource}/{operation}
+ Name *string `json:"name,omitempty"`
+ // Display - Display metadata associated with the operation.
+ Display *OperationDisplay `json:"display,omitempty"`
+ // Origin - The origin of operations.
+ Origin *string `json:"origin,omitempty"`
+ // OperationProperties - Properties of operation, include metric specifications.
+ *OperationProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Operation.
+func (o Operation) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if o.Name != nil {
+ objectMap["name"] = o.Name
+ }
+ if o.Display != nil {
+ objectMap["display"] = o.Display
+ }
+ if o.Origin != nil {
+ objectMap["origin"] = o.Origin
+ }
+ if o.OperationProperties != nil {
+ objectMap["properties"] = o.OperationProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Operation struct.
+func (o *Operation) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ o.Name = &name
+ }
+ case "display":
+ if v != nil {
+ var display OperationDisplay
+ err = json.Unmarshal(*v, &display)
+ if err != nil {
+ return err
+ }
+ o.Display = &display
+ }
+ case "origin":
+ if v != nil {
+ var origin string
+ err = json.Unmarshal(*v, &origin)
+ if err != nil {
+ return err
+ }
+ o.Origin = &origin
+ }
+ case "properties":
+ if v != nil {
+ var operationProperties OperationProperties
+ err = json.Unmarshal(*v, &operationProperties)
+ if err != nil {
+ return err
+ }
+ o.OperationProperties = &operationProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// OperationDisplay display metadata associated with the operation.
+type OperationDisplay struct {
+ // Provider - Service provider: Microsoft Storage.
+ Provider *string `json:"provider,omitempty"`
+ // Resource - Resource on which the operation is performed etc.
+ Resource *string `json:"resource,omitempty"`
+ // Operation - Type of operation: get, read, delete, etc.
+ Operation *string `json:"operation,omitempty"`
+ // Description - Description of the operation.
+ Description *string `json:"description,omitempty"`
+}
+
+// OperationListResult result of the request to list Storage operations. It contains a list of operations
+// and a URL link to get the next set of results.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of Storage operations supported by the Storage resource provider.
+ Value *[]Operation `json:"value,omitempty"`
+}
+
+// OperationProperties properties of operation, include metric specifications.
+type OperationProperties struct {
+ // ServiceSpecification - One property of operation, include metric specifications.
+ ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"`
+}
+
+// PermissionScope ...
+type PermissionScope struct {
+ // Permissions - The permissions for the local user. Possible values include: Read (r), Write (w), Delete (d), List (l), and Create (c).
+ Permissions *string `json:"permissions,omitempty"`
+ // Service - The service used by the local user, e.g. blob, file.
+ Service *string `json:"service,omitempty"`
+ // ResourceName - The name of resource, normally the container name or the file share name, used by the local user.
+ ResourceName *string `json:"resourceName,omitempty"`
+}
+
+// PrivateEndpoint the Private Endpoint resource.
+type PrivateEndpoint struct {
+ // ID - READ-ONLY; The ARM identifier for Private Endpoint
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateEndpoint.
+func (peVar PrivateEndpoint) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// PrivateEndpointConnection the Private Endpoint Connection resource.
+type PrivateEndpointConnection struct {
+ autorest.Response `json:"-"`
+ // PrivateEndpointConnectionProperties - Resource properties.
+ *PrivateEndpointConnectionProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateEndpointConnection.
+func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pec.PrivateEndpointConnectionProperties != nil {
+ objectMap["properties"] = pec.PrivateEndpointConnectionProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct.
+func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var privateEndpointConnectionProperties PrivateEndpointConnectionProperties
+ err = json.Unmarshal(*v, &privateEndpointConnectionProperties)
+ if err != nil {
+ return err
+ }
+ pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ pec.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ pec.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ pec.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// PrivateEndpointConnectionListResult list of private endpoint connection associated with the specified
+// storage account
+type PrivateEndpointConnectionListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Array of private endpoint connections
+ Value *[]PrivateEndpointConnection `json:"value,omitempty"`
+}
+
+// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties.
+type PrivateEndpointConnectionProperties struct {
+ // PrivateEndpoint - The resource of private end point.
+ PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"`
+ // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider.
+ PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"`
+ // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed'
+ ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// PrivateLinkResource a private link resource
+type PrivateLinkResource struct {
+ // PrivateLinkResourceProperties - Resource properties.
+ *PrivateLinkResourceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateLinkResource.
+func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if plr.PrivateLinkResourceProperties != nil {
+ objectMap["properties"] = plr.PrivateLinkResourceProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct.
+func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var privateLinkResourceProperties PrivateLinkResourceProperties
+ err = json.Unmarshal(*v, &privateLinkResourceProperties)
+ if err != nil {
+ return err
+ }
+ plr.PrivateLinkResourceProperties = &privateLinkResourceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ plr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ plr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ plr.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// PrivateLinkResourceListResult a list of private link resources
+type PrivateLinkResourceListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Array of private link resources
+ Value *[]PrivateLinkResource `json:"value,omitempty"`
+}
+
+// PrivateLinkResourceProperties properties of a private link resource.
+type PrivateLinkResourceProperties struct {
+ // GroupID - READ-ONLY; The private link resource group id.
+ GroupID *string `json:"groupId,omitempty"`
+ // RequiredMembers - READ-ONLY; The private link resource required member names.
+ RequiredMembers *[]string `json:"requiredMembers,omitempty"`
+ // RequiredZoneNames - The private link resource Private link DNS zone name.
+ RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateLinkResourceProperties.
+func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if plrp.RequiredZoneNames != nil {
+ objectMap["requiredZoneNames"] = plrp.RequiredZoneNames
+ }
+ return json.Marshal(objectMap)
+}
+
+// PrivateLinkServiceConnectionState a collection of information about the state of the connection between
+// service consumer and provider.
+type PrivateLinkServiceConnectionState struct {
+ // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected'
+ Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"`
+ // Description - The reason for approval/rejection of the connection.
+ Description *string `json:"description,omitempty"`
+ // ActionRequired - A message indicating if changes on the service provider require any updates on the consumer.
+ ActionRequired *string `json:"actionRequired,omitempty"`
+}
+
+// ProtectedAppendWritesHistory protected append writes history setting for the blob container with Legal
+// holds.
+type ProtectedAppendWritesHistory struct {
+ // AllowProtectedAppendWritesAll - When enabled, new blocks can be written to both 'Append and Bock Blobs' while maintaining legal hold protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted.
+ AllowProtectedAppendWritesAll *bool `json:"allowProtectedAppendWritesAll,omitempty"`
+ // Timestamp - READ-ONLY; Returns the date and time the tag was added.
+ Timestamp *date.Time `json:"timestamp,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ProtectedAppendWritesHistory.
+func (pawh ProtectedAppendWritesHistory) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pawh.AllowProtectedAppendWritesAll != nil {
+ objectMap["allowProtectedAppendWritesAll"] = pawh.AllowProtectedAppendWritesAll
+ }
+ return json.Marshal(objectMap)
+}
+
+// ProtocolSettings protocol settings for file service
+type ProtocolSettings struct {
+ // Smb - Setting for SMB protocol
+ Smb *SmbSetting `json:"smb,omitempty"`
+}
+
+// ProxyResource the resource model definition for a Azure Resource Manager proxy resource. It will not
+// have tags and a location
+type ProxyResource struct {
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ProxyResource.
+func (pr ProxyResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// Queue ...
+type Queue struct {
+ autorest.Response `json:"-"`
+ // QueueProperties - Queue resource properties.
+ *QueueProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Queue.
+func (q Queue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if q.QueueProperties != nil {
+ objectMap["properties"] = q.QueueProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Queue struct.
+func (q *Queue) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var queueProperties QueueProperties
+ err = json.Unmarshal(*v, &queueProperties)
+ if err != nil {
+ return err
+ }
+ q.QueueProperties = &queueProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ q.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ q.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ q.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// QueueProperties ...
+type QueueProperties struct {
+ // Metadata - A name-value pair that represents queue metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ApproximateMessageCount - READ-ONLY; Integer indicating an approximate number of messages in the queue. This number is not lower than the actual number of messages in the queue, but could be higher.
+ ApproximateMessageCount *int32 `json:"approximateMessageCount,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for QueueProperties.
+func (qp QueueProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if qp.Metadata != nil {
+ objectMap["metadata"] = qp.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// QueueServiceProperties the properties of a storage account’s Queue service.
+type QueueServiceProperties struct {
+ autorest.Response `json:"-"`
+ // QueueServicePropertiesProperties - The properties of a storage account’s Queue service.
+ *QueueServicePropertiesProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for QueueServiceProperties.
+func (qsp QueueServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if qsp.QueueServicePropertiesProperties != nil {
+ objectMap["properties"] = qsp.QueueServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for QueueServiceProperties struct.
+func (qsp *QueueServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var queueServiceProperties QueueServicePropertiesProperties
+ err = json.Unmarshal(*v, &queueServiceProperties)
+ if err != nil {
+ return err
+ }
+ qsp.QueueServicePropertiesProperties = &queueServiceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ qsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ qsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ qsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// QueueServicePropertiesProperties the properties of a storage account’s Queue service.
+type QueueServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the Queue service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Queue service.
+ Cors *CorsRules `json:"cors,omitempty"`
+}
+
+// Resource common fields that are returned in the response for all Azure Resource Manager resources
+type Resource struct {
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ResourceAccessRule resource Access Rule.
+type ResourceAccessRule struct {
+ // TenantID - Tenant Id
+ TenantID *string `json:"tenantId,omitempty"`
+ // ResourceID - Resource Id
+ ResourceID *string `json:"resourceId,omitempty"`
+}
+
+// RestorePolicyProperties the blob service properties for blob restore policy
+type RestorePolicyProperties struct {
+ // Enabled - Blob restore is enabled if set to true.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Days - how long this blob can be restored. It should be great than zero and less than DeleteRetentionPolicy.days.
+ Days *int32 `json:"days,omitempty"`
+ // LastEnabledTime - READ-ONLY; Deprecated in favor of minRestoreTime property.
+ LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"`
+ // MinRestoreTime - READ-ONLY; Returns the minimum date and time that the restore can be started.
+ MinRestoreTime *date.Time `json:"minRestoreTime,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RestorePolicyProperties.
+func (rpp RestorePolicyProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if rpp.Enabled != nil {
+ objectMap["enabled"] = rpp.Enabled
+ }
+ if rpp.Days != nil {
+ objectMap["days"] = rpp.Days
+ }
+ return json.Marshal(objectMap)
+}
+
+// Restriction the restriction because of which SKU cannot be used.
+type Restriction struct {
+ // Type - READ-ONLY; The type of restrictions. As of now only possible value for this is location.
+ Type *string `json:"type,omitempty"`
+ // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
+ Values *[]string `json:"values,omitempty"`
+ // ReasonCode - The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. Possible values include: 'ReasonCodeQuotaID', 'ReasonCodeNotAvailableForSubscription'
+ ReasonCode ReasonCode `json:"reasonCode,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Restriction.
+func (r Restriction) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if r.ReasonCode != "" {
+ objectMap["reasonCode"] = r.ReasonCode
+ }
+ return json.Marshal(objectMap)
+}
+
+// RoutingPreference routing preference defines the type of network, either microsoft or internet routing
+// to be used to deliver the user data, the default option is microsoft routing
+type RoutingPreference struct {
+ // RoutingChoice - Routing Choice defines the kind of network routing opted by the user. Possible values include: 'RoutingChoiceMicrosoftRouting', 'RoutingChoiceInternetRouting'
+ RoutingChoice RoutingChoice `json:"routingChoice,omitempty"`
+ // PublishMicrosoftEndpoints - A boolean flag which indicates whether microsoft routing storage endpoints are to be published
+ PublishMicrosoftEndpoints *bool `json:"publishMicrosoftEndpoints,omitempty"`
+ // PublishInternetEndpoints - A boolean flag which indicates whether internet routing storage endpoints are to be published
+ PublishInternetEndpoints *bool `json:"publishInternetEndpoints,omitempty"`
+}
+
+// SasPolicy sasPolicy assigned to the storage account.
+type SasPolicy struct {
+ // SasExpirationPeriod - The SAS expiration period, DD.HH:MM:SS.
+ SasExpirationPeriod *string `json:"sasExpirationPeriod,omitempty"`
+ // ExpirationAction - The SAS expiration action. Can only be Log.
+ ExpirationAction *string `json:"expirationAction,omitempty"`
+}
+
+// ServiceSasParameters the parameters to list service SAS credentials of a specific resource.
+type ServiceSasParameters struct {
+ // CanonicalizedResource - The canonical path to the signed resource.
+ CanonicalizedResource *string `json:"canonicalizedResource,omitempty"`
+ // Resource - The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). Possible values include: 'SignedResourceB', 'SignedResourceC', 'SignedResourceF', 'SignedResourceS'
+ Resource SignedResource `json:"signedResource,omitempty"`
+ // Permissions - The signed permissions for the service SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'PermissionsR', 'PermissionsD', 'PermissionsW', 'PermissionsL', 'PermissionsA', 'PermissionsC', 'PermissionsU', 'PermissionsP'
+ Permissions Permissions `json:"signedPermission,omitempty"`
+ // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests.
+ IPAddressOrRange *string `json:"signedIp,omitempty"`
+ // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'HTTPProtocolHttpshttp', 'HTTPProtocolHTTPS'
+ Protocols HTTPProtocol `json:"signedProtocol,omitempty"`
+ // SharedAccessStartTime - The time at which the SAS becomes valid.
+ SharedAccessStartTime *date.Time `json:"signedStart,omitempty"`
+ // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid.
+ SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"`
+ // Identifier - A unique value up to 64 characters in length that correlates to an access policy specified for the container, queue, or table.
+ Identifier *string `json:"signedIdentifier,omitempty"`
+ // PartitionKeyStart - The start of partition key.
+ PartitionKeyStart *string `json:"startPk,omitempty"`
+ // PartitionKeyEnd - The end of partition key.
+ PartitionKeyEnd *string `json:"endPk,omitempty"`
+ // RowKeyStart - The start of row key.
+ RowKeyStart *string `json:"startRk,omitempty"`
+ // RowKeyEnd - The end of row key.
+ RowKeyEnd *string `json:"endRk,omitempty"`
+ // KeyToSign - The key to sign the account SAS token with.
+ KeyToSign *string `json:"keyToSign,omitempty"`
+ // CacheControl - The response header override for cache control.
+ CacheControl *string `json:"rscc,omitempty"`
+ // ContentDisposition - The response header override for content disposition.
+ ContentDisposition *string `json:"rscd,omitempty"`
+ // ContentEncoding - The response header override for content encoding.
+ ContentEncoding *string `json:"rsce,omitempty"`
+ // ContentLanguage - The response header override for content language.
+ ContentLanguage *string `json:"rscl,omitempty"`
+ // ContentType - The response header override for content type.
+ ContentType *string `json:"rsct,omitempty"`
+}
+
+// ServiceSpecification one property of operation, include metric specifications.
+type ServiceSpecification struct {
+ // MetricSpecifications - Metric specifications of operation.
+ MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"`
+}
+
+// SignedIdentifier ...
+type SignedIdentifier struct {
+ // ID - An unique identifier of the stored access policy.
+ ID *string `json:"id,omitempty"`
+ // AccessPolicy - Access policy
+ AccessPolicy *AccessPolicy `json:"accessPolicy,omitempty"`
+}
+
+// Sku the SKU of the storage account.
+type Sku struct {
+ // Name - Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS'
+ Name SkuName `json:"name,omitempty"`
+ // Tier - Possible values include: 'SkuTierStandard', 'SkuTierPremium'
+ Tier SkuTier `json:"tier,omitempty"`
+}
+
+// SKUCapability the capability information in the specified SKU, including file encryption, network ACLs,
+// change notification, etc.
+type SKUCapability struct {
+ // Name - READ-ONLY; The name of capability, The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc.
+ Name *string `json:"name,omitempty"`
+ // Value - READ-ONLY; A string value to indicate states of given capability. Possibly 'true' or 'false'.
+ Value *string `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SKUCapability.
+func (sc SKUCapability) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// SkuInformation storage SKU and its properties
+type SkuInformation struct {
+ // Name - Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS'
+ Name SkuName `json:"name,omitempty"`
+ // Tier - Possible values include: 'SkuTierStandard', 'SkuTierPremium'
+ Tier SkuTier `json:"tier,omitempty"`
+ // ResourceType - READ-ONLY; The type of the resource, usually it is 'storageAccounts'.
+ ResourceType *string `json:"resourceType,omitempty"`
+ // Kind - READ-ONLY; Indicates the type of storage account. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+ // Locations - READ-ONLY; The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.).
+ Locations *[]string `json:"locations,omitempty"`
+ // Capabilities - READ-ONLY; The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc.
+ Capabilities *[]SKUCapability `json:"capabilities,omitempty"`
+ // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions.
+ Restrictions *[]Restriction `json:"restrictions,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SkuInformation.
+func (si SkuInformation) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if si.Name != "" {
+ objectMap["name"] = si.Name
+ }
+ if si.Tier != "" {
+ objectMap["tier"] = si.Tier
+ }
+ if si.Restrictions != nil {
+ objectMap["restrictions"] = si.Restrictions
+ }
+ return json.Marshal(objectMap)
+}
+
+// SkuListResult the response from the List Storage SKUs operation.
+type SkuListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; Get the list result of storage SKUs and their properties.
+ Value *[]SkuInformation `json:"value,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SkuListResult.
+func (slr SkuListResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// SmbSetting setting for SMB protocol
+type SmbSetting struct {
+ // Multichannel - Multichannel setting. Applies to Premium FileStorage only.
+ Multichannel *Multichannel `json:"multichannel,omitempty"`
+ // Versions - SMB protocol versions supported by server. Valid values are SMB2.1, SMB3.0, SMB3.1.1. Should be passed as a string with delimiter ';'.
+ Versions *string `json:"versions,omitempty"`
+ // AuthenticationMethods - SMB authentication methods supported by server. Valid values are NTLMv2, Kerberos. Should be passed as a string with delimiter ';'.
+ AuthenticationMethods *string `json:"authenticationMethods,omitempty"`
+ // KerberosTicketEncryption - Kerberos ticket encryption supported by server. Valid values are RC4-HMAC, AES-256. Should be passed as a string with delimiter ';'
+ KerberosTicketEncryption *string `json:"kerberosTicketEncryption,omitempty"`
+ // ChannelEncryption - SMB channel encryption supported by server. Valid values are AES-128-CCM, AES-128-GCM, AES-256-GCM. Should be passed as a string with delimiter ';'.
+ ChannelEncryption *string `json:"channelEncryption,omitempty"`
+}
+
+// SSHPublicKey ...
+type SSHPublicKey struct {
+ // Description - Optional. It is used to store the function/usage of the key
+ Description *string `json:"description,omitempty"`
+ // Key - Ssh public key base64 encoded. The format should be: ' ', e.g. ssh-rsa AAAABBBB
+ Key *string `json:"key,omitempty"`
+}
+
+// SystemData metadata pertaining to creation and last modification of the resource.
+type SystemData struct {
+ // CreatedBy - The identity that created the resource.
+ CreatedBy *string `json:"createdBy,omitempty"`
+ // CreatedByType - The type of identity that created the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey'
+ CreatedByType CreatedByType `json:"createdByType,omitempty"`
+ // CreatedAt - The timestamp of resource creation (UTC).
+ CreatedAt *date.Time `json:"createdAt,omitempty"`
+ // LastModifiedBy - The identity that last modified the resource.
+ LastModifiedBy *string `json:"lastModifiedBy,omitempty"`
+ // LastModifiedByType - The type of identity that last modified the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey'
+ LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"`
+ // LastModifiedAt - The timestamp of resource last modification (UTC)
+ LastModifiedAt *date.Time `json:"lastModifiedAt,omitempty"`
+}
+
+// Table properties of the table, including Id, resource name, resource type.
+type Table struct {
+ autorest.Response `json:"-"`
+ // TableProperties - Table resource properties.
+ *TableProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Table.
+func (t Table) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if t.TableProperties != nil {
+ objectMap["properties"] = t.TableProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Table struct.
+func (t *Table) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var tableProperties TableProperties
+ err = json.Unmarshal(*v, &tableProperties)
+ if err != nil {
+ return err
+ }
+ t.TableProperties = &tableProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ t.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ t.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ t.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// TableAccessPolicy table Access Policy Properties Object.
+type TableAccessPolicy struct {
+ // StartTime - Start time of the access policy
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // ExpiryTime - Expiry time of the access policy
+ ExpiryTime *date.Time `json:"expiryTime,omitempty"`
+ // Permission - Required. List of abbreviated permissions. Supported permission values include 'r','a','u','d'
+ Permission *string `json:"permission,omitempty"`
+}
+
+// TableProperties ...
+type TableProperties struct {
+ // TableName - READ-ONLY; Table name under the specified account
+ TableName *string `json:"tableName,omitempty"`
+ // SignedIdentifiers - List of stored access policies specified on the table.
+ SignedIdentifiers *[]TableSignedIdentifier `json:"signedIdentifiers,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for TableProperties.
+func (tp TableProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if tp.SignedIdentifiers != nil {
+ objectMap["signedIdentifiers"] = tp.SignedIdentifiers
+ }
+ return json.Marshal(objectMap)
+}
+
+// TableServiceProperties the properties of a storage account’s Table service.
+type TableServiceProperties struct {
+ autorest.Response `json:"-"`
+ // TableServicePropertiesProperties - The properties of a storage account’s Table service.
+ *TableServicePropertiesProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for TableServiceProperties.
+func (tsp TableServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if tsp.TableServicePropertiesProperties != nil {
+ objectMap["properties"] = tsp.TableServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for TableServiceProperties struct.
+func (tsp *TableServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var tableServiceProperties TableServicePropertiesProperties
+ err = json.Unmarshal(*v, &tableServiceProperties)
+ if err != nil {
+ return err
+ }
+ tsp.TableServicePropertiesProperties = &tableServiceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ tsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ tsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ tsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// TableServicePropertiesProperties the properties of a storage account’s Table service.
+type TableServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the Table service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Table service.
+ Cors *CorsRules `json:"cors,omitempty"`
+}
+
+// TableSignedIdentifier object to set Table Access Policy.
+type TableSignedIdentifier struct {
+ // ID - unique-64-character-value of the stored access policy.
+ ID *string `json:"id,omitempty"`
+ // AccessPolicy - Access policy
+ AccessPolicy *TableAccessPolicy `json:"accessPolicy,omitempty"`
+}
+
+// TagFilter blob index tag based filtering for blob objects
+type TagFilter struct {
+ // Name - This is the filter tag name, it can have 1 - 128 characters
+ Name *string `json:"name,omitempty"`
+ // Op - This is the comparison operator which is used for object comparison and filtering. Only == (equality operator) is currently supported
+ Op *string `json:"op,omitempty"`
+ // Value - This is the filter tag value field used for tag based filtering, it can have 0 - 256 characters
+ Value *string `json:"value,omitempty"`
+}
+
+// TagProperty a tag of the LegalHold of a blob container.
+type TagProperty struct {
+ // Tag - READ-ONLY; The tag value.
+ Tag *string `json:"tag,omitempty"`
+ // Timestamp - READ-ONLY; Returns the date and time the tag was added.
+ Timestamp *date.Time `json:"timestamp,omitempty"`
+ // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who added the tag.
+ ObjectIdentifier *string `json:"objectIdentifier,omitempty"`
+ // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who added the tag.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Upn - READ-ONLY; Returns the User Principal Name of the user who added the tag.
+ Upn *string `json:"upn,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for TagProperty.
+func (tp TagProperty) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// TrackedResource the resource model definition for an Azure Resource Manager tracked top level resource
+// which has 'tags' and a 'location'
+type TrackedResource struct {
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Location - The geo-location where the resource lives
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for TrackedResource.
+func (tr TrackedResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if tr.Tags != nil {
+ objectMap["tags"] = tr.Tags
+ }
+ if tr.Location != nil {
+ objectMap["location"] = tr.Location
+ }
+ return json.Marshal(objectMap)
+}
+
+// UpdateHistoryProperty an update history of the ImmutabilityPolicy of a blob container.
+type UpdateHistoryProperty struct {
+ // Update - READ-ONLY; The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. Possible values include: 'ImmutabilityPolicyUpdateTypePut', 'ImmutabilityPolicyUpdateTypeLock', 'ImmutabilityPolicyUpdateTypeExtend'
+ Update ImmutabilityPolicyUpdateType `json:"update,omitempty"`
+ // ImmutabilityPeriodSinceCreationInDays - READ-ONLY; The immutability period for the blobs in the container since the policy creation, in days.
+ ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"`
+ // Timestamp - READ-ONLY; Returns the date and time the ImmutabilityPolicy was updated.
+ Timestamp *date.Time `json:"timestamp,omitempty"`
+ // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who updated the ImmutabilityPolicy.
+ ObjectIdentifier *string `json:"objectIdentifier,omitempty"`
+ // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who updated the ImmutabilityPolicy.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Upn - READ-ONLY; Returns the User Principal Name of the user who updated the ImmutabilityPolicy.
+ Upn *string `json:"upn,omitempty"`
+ // AllowProtectedAppendWrites - This property can only be changed for unlocked time-based retention policies. When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. This property cannot be changed with ExtendImmutabilityPolicy API.
+ AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty"`
+ // AllowProtectedAppendWritesAll - This property can only be changed for unlocked time-based retention policies. When enabled, new blocks can be written to both 'Append and Bock Blobs' while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. This property cannot be changed with ExtendImmutabilityPolicy API. The 'allowProtectedAppendWrites' and 'allowProtectedAppendWritesAll' properties are mutually exclusive.
+ AllowProtectedAppendWritesAll *bool `json:"allowProtectedAppendWritesAll,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for UpdateHistoryProperty.
+func (uhp UpdateHistoryProperty) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if uhp.AllowProtectedAppendWrites != nil {
+ objectMap["allowProtectedAppendWrites"] = uhp.AllowProtectedAppendWrites
+ }
+ if uhp.AllowProtectedAppendWritesAll != nil {
+ objectMap["allowProtectedAppendWritesAll"] = uhp.AllowProtectedAppendWritesAll
+ }
+ return json.Marshal(objectMap)
+}
+
+// Usage describes Storage Resource Usage.
+type Usage struct {
+ // Unit - READ-ONLY; Gets the unit of measurement. Possible values include: 'UsageUnitCount', 'UsageUnitBytes', 'UsageUnitSeconds', 'UsageUnitPercent', 'UsageUnitCountsPerSecond', 'UsageUnitBytesPerSecond'
+ Unit UsageUnit `json:"unit,omitempty"`
+ // CurrentValue - READ-ONLY; Gets the current count of the allocated resources in the subscription.
+ CurrentValue *int32 `json:"currentValue,omitempty"`
+ // Limit - READ-ONLY; Gets the maximum count of the resources that can be allocated in the subscription.
+ Limit *int32 `json:"limit,omitempty"`
+ // Name - READ-ONLY; Gets the name of the type of usage.
+ Name *UsageName `json:"name,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Usage.
+func (u Usage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// UsageListResult the response from the List Usages operation.
+type UsageListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Gets or sets the list of Storage Resource Usages.
+ Value *[]Usage `json:"value,omitempty"`
+}
+
+// UsageName the usage names that can be used; currently limited to StorageAccount.
+type UsageName struct {
+ // Value - READ-ONLY; Gets a string describing the resource name.
+ Value *string `json:"value,omitempty"`
+ // LocalizedValue - READ-ONLY; Gets a localized string describing the resource name.
+ LocalizedValue *string `json:"localizedValue,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for UsageName.
+func (un UsageName) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// UserAssignedIdentity userAssignedIdentity for the resource.
+type UserAssignedIdentity struct {
+ // PrincipalID - READ-ONLY; The principal ID of the identity.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // ClientID - READ-ONLY; The client ID of the identity.
+ ClientID *string `json:"clientId,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for UserAssignedIdentity.
+func (uai UserAssignedIdentity) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// VirtualNetworkRule virtual Network rule.
+type VirtualNetworkRule struct {
+ // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
+ VirtualNetworkResourceID *string `json:"id,omitempty"`
+ // Action - The action of virtual network rule. Possible values include: 'ActionAllow'
+ Action Action `json:"action,omitempty"`
+ // State - Gets the state of virtual network rule. Possible values include: 'StateProvisioning', 'StateDeprovisioning', 'StateSucceeded', 'StateFailed', 'StateNetworkSourceDeleted'
+ State State `json:"state,omitempty"`
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/objectreplicationpolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/objectreplicationpolicies.go
new file mode 100644
index 000000000000..bf815f6c03fa
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/objectreplicationpolicies.go
@@ -0,0 +1,426 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ObjectReplicationPoliciesClient is the the Azure Storage Management API.
+type ObjectReplicationPoliciesClient struct {
+ BaseClient
+}
+
+// NewObjectReplicationPoliciesClient creates an instance of the ObjectReplicationPoliciesClient client.
+func NewObjectReplicationPoliciesClient(subscriptionID string) ObjectReplicationPoliciesClient {
+ return NewObjectReplicationPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewObjectReplicationPoliciesClientWithBaseURI creates an instance of the ObjectReplicationPoliciesClient client
+// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
+// clouds, Azure stack).
+func NewObjectReplicationPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ObjectReplicationPoliciesClient {
+ return ObjectReplicationPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate create or update the object replication policy of the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// objectReplicationPolicyID - for the destination account, provide the value 'default'. Configure the policy
+// on the destination account first. For the source account, provide the value of the policy ID that is
+// returned when you download the policy that was defined on the destination account. The policy is downloaded
+// as a JSON file.
+// properties - the object replication policy set to a storage account. A unique policy ID will be created if
+// absent.
+func (client ObjectReplicationPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string, properties ObjectReplicationPolicy) (result ObjectReplicationPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: objectReplicationPolicyID,
+ Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: properties,
+ Constraints: []validation.Constraint{{Target: "properties.ObjectReplicationPolicyProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "properties.ObjectReplicationPolicyProperties.SourceAccount", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "properties.ObjectReplicationPolicyProperties.DestinationAccount", Name: validation.Null, Rule: true, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ObjectReplicationPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string, properties ObjectReplicationPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ObjectReplicationPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ObjectReplicationPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ObjectReplicationPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the object replication policy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// objectReplicationPolicyID - for the destination account, provide the value 'default'. Configure the policy
+// on the destination account first. For the source account, provide the value of the policy ID that is
+// returned when you download the policy that was defined on the destination account. The policy is downloaded
+// as a JSON file.
+func (client ObjectReplicationPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: objectReplicationPolicyID,
+ Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ObjectReplicationPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ObjectReplicationPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ObjectReplicationPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get the object replication policy of the storage account by policy ID.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// objectReplicationPolicyID - for the destination account, provide the value 'default'. Configure the policy
+// on the destination account first. For the source account, provide the value of the policy ID that is
+// returned when you download the policy that was defined on the destination account. The policy is downloaded
+// as a JSON file.
+func (client ObjectReplicationPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (result ObjectReplicationPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: objectReplicationPolicyID,
+ Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ObjectReplicationPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ObjectReplicationPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ObjectReplicationPoliciesClient) GetResponder(resp *http.Response) (result ObjectReplicationPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list the object replication policies associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client ObjectReplicationPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ObjectReplicationPolicies, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ObjectReplicationPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ObjectReplicationPoliciesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ObjectReplicationPoliciesClient) ListResponder(resp *http.Response) (result ObjectReplicationPolicies, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/operations.go
new file mode 100644
index 000000000000..af5740f324f8
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/operations.go
@@ -0,0 +1,98 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the the Azure Storage Management API.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this
+// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available Storage Rest API operations.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Storage/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/privateendpointconnections.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/privateendpointconnections.go
new file mode 100644
index 000000000000..6bd6921f59dd
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/privateendpointconnections.go
@@ -0,0 +1,411 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PrivateEndpointConnectionsClient is the the Azure Storage Management API.
+type PrivateEndpointConnectionsClient struct {
+ BaseClient
+}
+
+// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client.
+func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient {
+ return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client
+// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
+// clouds, Azure stack).
+func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient {
+ return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Delete deletes the specified private endpoint connection associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure
+// resource
+func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the specified private endpoint connection associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure
+// resource
+func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list all the private endpoint connections associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client PrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName string, accountName string) (result PrivateEndpointConnectionListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client PrivateEndpointConnectionsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) ListResponder(resp *http.Response) (result PrivateEndpointConnectionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Put update the state of specified private endpoint connection associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure
+// resource
+// properties - the private endpoint connection properties.
+func (client PrivateEndpointConnectionsClient) Put(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (result PrivateEndpointConnection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Put")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: properties,
+ Constraints: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Put", err.Error())
+ }
+
+ req, err := client.PutPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PutSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PutResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// PutPreparer prepares the Put request.
+func (client PrivateEndpointConnectionsClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PutSender sends the Put request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) PutSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// PutResponder handles the response to the Put request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) PutResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/privatelinkresources.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/privatelinkresources.go
new file mode 100644
index 000000000000..eefcb3e152a9
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/privatelinkresources.go
@@ -0,0 +1,124 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PrivateLinkResourcesClient is the the Azure Storage Management API.
+type PrivateLinkResourcesClient struct {
+ BaseClient
+}
+
+// NewPrivateLinkResourcesClient creates an instance of the PrivateLinkResourcesClient client.
+func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient {
+ return NewPrivateLinkResourcesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPrivateLinkResourcesClientWithBaseURI creates an instance of the PrivateLinkResourcesClient client using a custom
+// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
+// stack).
+func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient {
+ return PrivateLinkResourcesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ListByStorageAccount gets the private link resources that need to be created for a storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client PrivateLinkResourcesClient) ListByStorageAccount(ctx context.Context, resourceGroupName string, accountName string) (result PrivateLinkResourceListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateLinkResourcesClient.ListByStorageAccount")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateLinkResourcesClient", "ListByStorageAccount", err.Error())
+ }
+
+ req, err := client.ListByStorageAccountPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByStorageAccountSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByStorageAccountResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListByStorageAccountPreparer prepares the ListByStorageAccount request.
+func (client PrivateLinkResourcesClient) ListByStorageAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByStorageAccountSender sends the ListByStorageAccount request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateLinkResourcesClient) ListByStorageAccountSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListByStorageAccountResponder handles the response to the ListByStorageAccount request. The method always
+// closes the http.Response Body.
+func (client PrivateLinkResourcesClient) ListByStorageAccountResponder(resp *http.Response) (result PrivateLinkResourceListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/queue.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/queue.go
new file mode 100644
index 000000000000..ce6ae22a0655
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/queue.go
@@ -0,0 +1,571 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// QueueClient is the the Azure Storage Management API.
+type QueueClient struct {
+ BaseClient
+}
+
+// NewQueueClient creates an instance of the QueueClient client.
+func NewQueueClient(subscriptionID string) QueueClient {
+ return NewQueueClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewQueueClientWithBaseURI creates an instance of the QueueClient client using a custom endpoint. Use this when
+// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewQueueClientWithBaseURI(baseURI string, subscriptionID string) QueueClient {
+ return QueueClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new queue with the specified queue name, under the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an
+// alphanumeric character and it cannot have two consecutive dash(-) characters.
+// queue - queue properties and metadata to be created with
+func (client QueueClient) Create(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (result Queue, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: queueName,
+ Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, queueName, queue)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client QueueClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "queueName": autorest.Encode("path", queueName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters),
+ autorest.WithJSON(queue),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueClient) CreateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client QueueClient) CreateResponder(resp *http.Response) (result Queue, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the queue with the specified queue name, under the specified account if it exists.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an
+// alphanumeric character and it cannot have two consecutive dash(-) characters.
+func (client QueueClient) Delete(ctx context.Context, resourceGroupName string, accountName string, queueName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: queueName,
+ Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, queueName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client QueueClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "queueName": autorest.Encode("path", queueName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client QueueClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the queue with the specified queue name, under the specified account if it exists.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an
+// alphanumeric character and it cannot have two consecutive dash(-) characters.
+func (client QueueClient) Get(ctx context.Context, resourceGroupName string, accountName string, queueName string) (result Queue, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: queueName,
+ Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, queueName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client QueueClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "queueName": autorest.Encode("path", queueName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client QueueClient) GetResponder(resp *http.Response) (result Queue, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets a list of all the queues under the specified storage account
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// maxpagesize - optional, a maximum number of queues that should be included in a list queue response
+// filter - optional, When specified, only the queues with a name starting with the given filter will be
+// listed.
+func (client QueueClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListQueueResourcePage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.List")
+ defer func() {
+ sc := -1
+ if result.lqr.Response.Response != nil {
+ sc = result.lqr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lqr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lqr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.lqr.hasNextLink() && result.lqr.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client QueueClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(maxpagesize) > 0 {
+ queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client QueueClient) ListResponder(resp *http.Response) (result ListQueueResource, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client QueueClient) listNextResults(ctx context.Context, lastResults ListQueueResource) (result ListQueueResource, err error) {
+ req, err := lastResults.listQueueResourcePreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client QueueClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListQueueResourceIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter)
+ return
+}
+
+// Update creates a new queue with the specified queue name, under the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an
+// alphanumeric character and it cannot have two consecutive dash(-) characters.
+// queue - queue properties and metadata to be created with
+func (client QueueClient) Update(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (result Queue, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: queueName,
+ Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, queueName, queue)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client QueueClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "queueName": autorest.Encode("path", queueName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters),
+ autorest.WithJSON(queue),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client QueueClient) UpdateResponder(resp *http.Response) (result Queue, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/queueservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/queueservices.go
new file mode 100644
index 000000000000..21d70bb06d5d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/queueservices.go
@@ -0,0 +1,313 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// QueueServicesClient is the the Azure Storage Management API.
+type QueueServicesClient struct {
+ BaseClient
+}
+
+// NewQueueServicesClient creates an instance of the QueueServicesClient client.
+func NewQueueServicesClient(subscriptionID string) QueueServicesClient {
+ return NewQueueServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewQueueServicesClientWithBaseURI creates an instance of the QueueServicesClient client using a custom endpoint.
+// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewQueueServicesClientWithBaseURI(baseURI string, subscriptionID string) QueueServicesClient {
+ return QueueServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of a storage account’s Queue service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client QueueServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result QueueServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client QueueServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "queueServiceName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client QueueServicesClient) GetServicePropertiesResponder(resp *http.Response) (result QueueServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list all queue services for the storage account
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client QueueServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListQueueServices, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client QueueServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client QueueServicesClient) ListResponder(resp *http.Response) (result ListQueueServices, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of a storage account’s Queue service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of a storage account’s Queue service, only properties for Storage Analytics and
+// CORS (Cross-Origin Resource Sharing) rules can be specified.
+func (client QueueServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters QueueServiceProperties) (result QueueServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.QueueServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client QueueServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters QueueServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "queueServiceName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueueServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client QueueServicesClient) SetServicePropertiesResponder(resp *http.Response) (result QueueServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/skus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/skus.go
new file mode 100644
index 000000000000..a756e884531e
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/skus.go
@@ -0,0 +1,109 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// SkusClient is the the Azure Storage Management API.
+type SkusClient struct {
+ BaseClient
+}
+
+// NewSkusClient creates an instance of the SkusClient client.
+func NewSkusClient(subscriptionID string) SkusClient {
+ return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewSkusClientWithBaseURI creates an instance of the SkusClient client using a custom endpoint. Use this when
+// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient {
+ return SkusClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists the available SKUs supported by Microsoft.Storage for given subscription.
+func (client SkusClient) List(ctx context.Context) (result SkuListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SkusClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.SkusClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client SkusClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/table.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/table.go
new file mode 100644
index 000000000000..77d7dff4024e
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/table.go
@@ -0,0 +1,568 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// TableClient is the the Azure Storage Management API.
+type TableClient struct {
+ BaseClient
+}
+
+// NewTableClient creates an instance of the TableClient client.
+func NewTableClient(subscriptionID string) TableClient {
+ return NewTableClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewTableClientWithBaseURI creates an instance of the TableClient client using a custom endpoint. Use this when
+// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewTableClientWithBaseURI(baseURI string, subscriptionID string) TableClient {
+ return TableClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new table with the specified table name, under the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of only alphanumeric characters and it cannot begin with a numeric character.
+// parameters - the parameters to provide to create a table.
+func (client TableClient) Create(ctx context.Context, resourceGroupName string, accountName string, tableName string, parameters *Table) (result Table, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: tableName,
+ Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, tableName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client TableClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string, parameters *Table) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "tableName": autorest.Encode("path", tableName),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableClient) CreateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client TableClient) CreateResponder(resp *http.Response) (result Table, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the table with the specified table name, under the specified account if it exists.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of only alphanumeric characters and it cannot begin with a numeric character.
+func (client TableClient) Delete(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: tableName,
+ Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, tableName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client TableClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "tableName": autorest.Encode("path", tableName),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client TableClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the table with the specified table name, under the specified account if it exists.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of only alphanumeric characters and it cannot begin with a numeric character.
+func (client TableClient) Get(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: tableName,
+ Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, tableName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client TableClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "tableName": autorest.Encode("path", tableName),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableClient) GetSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client TableClient) GetResponder(resp *http.Response) (result Table, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets a list of all the tables under the specified storage account
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client TableClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListTableResourcePage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.List")
+ defer func() {
+ sc := -1
+ if result.ltr.Response.Response != nil {
+ sc = result.ltr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.ltr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.ltr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "List", resp, "Failure responding to request")
+ return
+ }
+ if result.ltr.hasNextLink() && result.ltr.IsEmpty() {
+ err = result.NextWithContext(ctx)
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client TableClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client TableClient) ListResponder(resp *http.Response) (result ListTableResource, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client TableClient) listNextResults(ctx context.Context, lastResults ListTableResource) (result ListTableResource, err error) {
+ req, err := lastResults.listTableResourcePreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client TableClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result ListTableResourceIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName)
+ return
+}
+
+// Update creates a new table with the specified table name, under the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The
+// name must comprise of only alphanumeric characters and it cannot begin with a numeric character.
+// parameters - the parameters to provide to create a table.
+func (client TableClient) Update(ctx context.Context, resourceGroupName string, accountName string, tableName string, parameters *Table) (result Table, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: tableName,
+ Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, tableName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client TableClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string, parameters *Table) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "tableName": autorest.Encode("path", tableName),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client TableClient) UpdateResponder(resp *http.Response) (result Table, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/tableservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/tableservices.go
new file mode 100644
index 000000000000..694ca736b6a8
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/tableservices.go
@@ -0,0 +1,313 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// TableServicesClient is the the Azure Storage Management API.
+type TableServicesClient struct {
+ BaseClient
+}
+
+// NewTableServicesClient creates an instance of the TableServicesClient client.
+func NewTableServicesClient(subscriptionID string) TableServicesClient {
+ return NewTableServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewTableServicesClientWithBaseURI creates an instance of the TableServicesClient client using a custom endpoint.
+// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewTableServicesClientWithBaseURI(baseURI string, subscriptionID string) TableServicesClient {
+ return TableServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of a storage account’s Table service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client TableServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result TableServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client TableServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "tableServiceName": autorest.Encode("path", "default"),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client TableServicesClient) GetServicePropertiesResponder(resp *http.Response) (result TableServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list all table services for the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client TableServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListTableServices, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client TableServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client TableServicesClient) ListResponder(resp *http.Response) (result ListTableServices, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of a storage account’s Table service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of a storage account’s Table service, only properties for Storage Analytics and
+// CORS (Cross-Origin Resource Sharing) rules can be specified.
+func (client TableServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters TableServiceProperties) (result TableServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.TableServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client TableServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters TableServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "tableServiceName": autorest.Encode("path", "default"),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client TableServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client TableServicesClient) SetServicePropertiesResponder(resp *http.Response) (result TableServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/usages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/usages.go
new file mode 100644
index 000000000000..0952a56a5325
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/usages.go
@@ -0,0 +1,112 @@
+package storage
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// UsagesClient is the the Azure Storage Management API.
+type UsagesClient struct {
+ BaseClient
+}
+
+// NewUsagesClient creates an instance of the UsagesClient client.
+func NewUsagesClient(subscriptionID string) UsagesClient {
+ return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client using a custom endpoint. Use this when
+// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
+ return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ListByLocation gets the current usage count and the limit for the resources of the location under the subscription.
+// Parameters:
+// location - the location of the Azure Storage resource.
+func (client UsagesClient) ListByLocation(ctx context.Context, location string) (result UsageListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.ListByLocation")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.UsagesClient", "ListByLocation", err.Error())
+ }
+
+ req, err := client.ListByLocationPreparer(ctx, location)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByLocationSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByLocationResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure responding to request")
+ return
+ }
+
+ return
+}
+
+// ListByLocationPreparer prepares the ListByLocation request.
+func (client UsagesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2021-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByLocationSender sends the ListByLocation request. The method will close the
+// http.Response Body if it receives an error.
+func (client UsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) {
+ return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListByLocationResponder handles the response to the ListByLocation request. The method always
+// closes the http.Response Body.
+func (client UsagesClient) ListByLocationResponder(resp *http.Response) (result UsageListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/version.go
new file mode 100644
index 000000000000..5e71325b63f7
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage/version.go
@@ -0,0 +1,19 @@
+package storage
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + Version() + " storage/2021-09-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
index c4aee486fa06..2dde094414cf 100644
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
@@ -4,4 +4,4 @@ package version
// Licensed under the MIT License. See License.txt in the project root for license information.
// Number contains the semantic version of this SDK.
-const Number = "v65.0.0"
+const Number = "v67.2.0"
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go
index 9d5e9cf5eb2b..6ff47dd5c35b 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go
@@ -998,6 +998,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
// initialize completion at the last point to allow for user overriding
c.InitDefaultCompletionCmd()
+ // Now that all commands have been created, let's make sure all groups
+ // are properly created also
+ c.checkCommandGroups()
+
args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
@@ -1092,6 +1096,19 @@ func (c *Command) ValidateRequiredFlags() error {
return nil
}
+// checkCommandGroups checks if a command has been added to a group that does not exists.
+// If so, we panic because it indicates a coding error that should be corrected.
+func (c *Command) checkCommandGroups() {
+ for _, sub := range c.commands {
+ // if Group is not defined let the developer know right away
+ if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) {
+ panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath()))
+ }
+
+ sub.checkCommandGroups()
+ }
+}
+
// InitDefaultHelpFlag adds default help flag to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help flag, it will do nothing.
@@ -1218,10 +1235,6 @@ func (c *Command) AddCommand(cmds ...*Command) {
panic("Command can't be a child of itself")
}
cmds[i].parent = c
- // if Group is not defined let the developer know right away
- if x.GroupID != "" && !c.ContainsGroup(x.GroupID) {
- panic(fmt.Sprintf("Group id '%s' is not defined for subcommand '%s'", x.GroupID, cmds[i].CommandPath()))
- }
// update max lengths
usageLen := len(x.Use)
if usageLen > c.commandsMaxUseLen {
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md b/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md
index 977306aa8c37..e55367e853f5 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md
@@ -492,10 +492,11 @@ around it. In fact, you can provide your own if you want.
### Grouping commands in help
-Cobra supports grouping of available commands. Groups must be explicitly defined by `AddGroup` and set by
-the `GroupId` element of a subcommand. The groups will appear in the same order as they are defined.
-If you use the generated `help` or `completion` commands, you can set the group ids by `SetHelpCommandGroupId`
-and `SetCompletionCommandGroupId`, respectively.
+Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly
+defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element
+of that subcommand. The groups will appear in the help output in the same order as they are defined using different
+calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using
+`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively.
### Defining your own help
diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml
index a749ac5492e5..7746f516da20 100644
--- a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml
+++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml
@@ -25,6 +25,6 @@ tasks:
- go test -race ./...
test-coverage:
- desc: Runs go tests and calucates test coverage
+ desc: Runs go tests and calculates test coverage
cmds:
- go test -race -coverprofile=c.out ./...
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 3a1674a1e57c..401414dde2f4 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -264,36 +264,35 @@ func (s *String) ReadASN1Boolean(out *bool) bool {
return true
}
-var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
-
// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
-// not point to an integer or to a big.Int, it panics. It reports whether the
-// read was successful.
+// not point to an integer, to a big.Int, or to a []byte it panics. Only
+// positive and zero values can be decoded into []byte, and they are returned as
+// big-endian binary values that share memory with s. Positive values will have
+// no leading zeroes, and zero will be returned as a single zero byte.
+// ReadASN1Integer reports whether the read was successful.
func (s *String) ReadASN1Integer(out interface{}) bool {
- if reflect.TypeOf(out).Kind() != reflect.Ptr {
- panic("out is not a pointer")
- }
- switch reflect.ValueOf(out).Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch out := out.(type) {
+ case *int, *int8, *int16, *int32, *int64:
var i int64
if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
return false
}
reflect.ValueOf(out).Elem().SetInt(i)
return true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ case *uint, *uint8, *uint16, *uint32, *uint64:
var u uint64
if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
return false
}
reflect.ValueOf(out).Elem().SetUint(u)
return true
- case reflect.Struct:
- if reflect.TypeOf(out).Elem() == bigIntType {
- return s.readASN1BigInt(out.(*big.Int))
- }
+ case *big.Int:
+ return s.readASN1BigInt(out)
+ case *[]byte:
+ return s.readASN1Bytes(out)
+ default:
+ panic("out does not point to an integer type")
}
- panic("out does not point to an integer type")
}
func checkASN1Integer(bytes []byte) bool {
@@ -333,6 +332,21 @@ func (s *String) readASN1BigInt(out *big.Int) bool {
return true
}
+func (s *String) readASN1Bytes(out *[]byte) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
+ return false
+ }
+ if bytes[0]&0x80 == 0x80 {
+ return false
+ }
+ for len(bytes) > 1 && bytes[0] == 0 {
+ bytes = bytes[1:]
+ }
+ *out = bytes
+ return true
+}
+
func (s *String) readASN1Int64(out *int64) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
@@ -532,7 +546,7 @@ func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
return false
}
- paddingBits := uint8(bytes[0])
+ paddingBits := bytes[0]
bytes = bytes[1:]
if paddingBits > 7 ||
len(bytes) == 0 && paddingBits != 0 ||
@@ -554,7 +568,7 @@ func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
return false
}
- paddingBits := uint8(bytes[0])
+ paddingBits := bytes[0]
if paddingBits != 0 {
return false
}
@@ -654,34 +668,27 @@ func (s *String) SkipOptionalASN1(tag asn1.Tag) bool {
return s.ReadASN1(&unused, tag)
}
-// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER
-// explicitly tagged with tag into out and advances. If no element with a
-// matching tag is present, it writes defaultValue into out instead. If out
-// does not point to an integer or to a big.Int, it panics. It reports
-// whether the read was successful.
+// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER explicitly
+// tagged with tag into out and advances. If no element with a matching tag is
+// present, it writes defaultValue into out instead. Otherwise, it behaves like
+// ReadASN1Integer.
func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool {
- if reflect.TypeOf(out).Kind() != reflect.Ptr {
- panic("out is not a pointer")
- }
var present bool
var i String
if !s.ReadOptionalASN1(&i, &present, tag) {
return false
}
if !present {
- switch reflect.ValueOf(out).Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ switch out.(type) {
+ case *int, *int8, *int16, *int32, *int64,
+ *uint, *uint8, *uint16, *uint32, *uint64, *[]byte:
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue))
- case reflect.Struct:
- if reflect.TypeOf(out).Elem() != bigIntType {
- panic("invalid integer type")
- }
- if reflect.TypeOf(defaultValue).Kind() != reflect.Ptr ||
- reflect.TypeOf(defaultValue).Elem() != bigIntType {
+ case *big.Int:
+ if defaultValue, ok := defaultValue.(*big.Int); ok {
+ out.(*big.Int).Set(defaultValue)
+ } else {
panic("out points to big.Int, but defaultValue does not")
}
- out.(*big.Int).Set(defaultValue.(*big.Int))
default:
panic("invalid integer type")
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
index 7499e3fb69d2..05de9cc2cdcc 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
@@ -14,6 +14,7 @@ package rc2
import (
"crypto/cipher"
"encoding/binary"
+ "math/bits"
)
// The rc2 block size in bytes
@@ -80,10 +81,6 @@ func expandKey(key []byte, t1 int) [64]uint16 {
return k
}
-func rotl16(x uint16, b uint) uint16 {
- return (x >> (16 - b)) | (x << b)
-}
-
func (c *rc2Cipher) Encrypt(dst, src []byte) {
r0 := binary.LittleEndian.Uint16(src[0:])
@@ -96,22 +93,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) {
for j <= 16 {
// mix r0
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
- r0 = rotl16(r0, 1)
+ r0 = bits.RotateLeft16(r0, 1)
j++
// mix r1
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
- r1 = rotl16(r1, 2)
+ r1 = bits.RotateLeft16(r1, 2)
j++
// mix r2
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
- r2 = rotl16(r2, 3)
+ r2 = bits.RotateLeft16(r2, 3)
j++
// mix r3
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
- r3 = rotl16(r3, 5)
+ r3 = bits.RotateLeft16(r3, 5)
j++
}
@@ -124,22 +121,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) {
for j <= 40 {
// mix r0
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
- r0 = rotl16(r0, 1)
+ r0 = bits.RotateLeft16(r0, 1)
j++
// mix r1
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
- r1 = rotl16(r1, 2)
+ r1 = bits.RotateLeft16(r1, 2)
j++
// mix r2
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
- r2 = rotl16(r2, 3)
+ r2 = bits.RotateLeft16(r2, 3)
j++
// mix r3
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
- r3 = rotl16(r3, 5)
+ r3 = bits.RotateLeft16(r3, 5)
j++
}
@@ -152,22 +149,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) {
for j <= 60 {
// mix r0
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
- r0 = rotl16(r0, 1)
+ r0 = bits.RotateLeft16(r0, 1)
j++
// mix r1
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
- r1 = rotl16(r1, 2)
+ r1 = bits.RotateLeft16(r1, 2)
j++
// mix r2
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
- r2 = rotl16(r2, 3)
+ r2 = bits.RotateLeft16(r2, 3)
j++
// mix r3
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
- r3 = rotl16(r3, 5)
+ r3 = bits.RotateLeft16(r3, 5)
j++
}
@@ -188,22 +185,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) {
for j >= 44 {
// unmix r3
- r3 = rotl16(r3, 16-5)
+ r3 = bits.RotateLeft16(r3, 16-5)
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
j--
// unmix r2
- r2 = rotl16(r2, 16-3)
+ r2 = bits.RotateLeft16(r2, 16-3)
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
j--
// unmix r1
- r1 = rotl16(r1, 16-2)
+ r1 = bits.RotateLeft16(r1, 16-2)
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
j--
// unmix r0
- r0 = rotl16(r0, 16-1)
+ r0 = bits.RotateLeft16(r0, 16-1)
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
j--
}
@@ -215,22 +212,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) {
for j >= 20 {
// unmix r3
- r3 = rotl16(r3, 16-5)
+ r3 = bits.RotateLeft16(r3, 16-5)
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
j--
// unmix r2
- r2 = rotl16(r2, 16-3)
+ r2 = bits.RotateLeft16(r2, 16-3)
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
j--
// unmix r1
- r1 = rotl16(r1, 16-2)
+ r1 = bits.RotateLeft16(r1, 16-2)
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
j--
// unmix r0
- r0 = rotl16(r0, 16-1)
+ r0 = bits.RotateLeft16(r0, 16-1)
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
j--
@@ -243,22 +240,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) {
for j >= 0 {
// unmix r3
- r3 = rotl16(r3, 16-5)
+ r3 = bits.RotateLeft16(r3, 16-5)
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
j--
// unmix r2
- r2 = rotl16(r2, 16-3)
+ r2 = bits.RotateLeft16(r2, 16-3)
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
j--
// unmix r1
- r1 = rotl16(r1, 16-2)
+ r1 = bits.RotateLeft16(r1, 16-2)
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
j--
// unmix r0
- r0 = rotl16(r0, 16-1)
+ r0 = bits.RotateLeft16(r0, 16-1)
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
j--
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
index 4c96147c86b6..3fd05b275169 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
@@ -5,6 +5,8 @@
// Package salsa provides low-level access to functions in the Salsa family.
package salsa // import "golang.org/x/crypto/salsa20/salsa"
+import "math/bits"
+
// Sigma is the Salsa20 constant for 256-bit keys.
var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
@@ -31,76 +33,76 @@ func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
for i := 0; i < 20; i += 2 {
u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
+ x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
+ x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
+ x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
+ x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
+ x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
+ x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
+ x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
+ x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
+ x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
+ x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
+ x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
+ x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
+ x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
+ x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
+ x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
+ x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
}
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
index 9bfc0927ce8c..7ec7bb39bc04 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
@@ -4,6 +4,8 @@
package salsa
+import "math/bits"
+
// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
// the result into the 64-byte array out. The input and output may be the same array.
func Core208(out *[64]byte, in *[64]byte) {
@@ -29,76 +31,76 @@ func Core208(out *[64]byte, in *[64]byte) {
for i := 0; i < 8; i += 2 {
u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
+ x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
+ x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
+ x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
+ x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
+ x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
+ x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
+ x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
+ x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
+ x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
+ x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
+ x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
+ x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
+ x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
+ x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
+ x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
+ x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
}
x0 += j0
x1 += j1
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
index 68169c6d6819..e5cdb9a25bee 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
@@ -4,6 +4,8 @@
package salsa
+import "math/bits"
+
const rounds = 20
// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
@@ -31,76 +33,76 @@ func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
for i := 0; i < rounds; i += 2 {
u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
+ x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
+ x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
+ x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
+ x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
+ x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
+ x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
+ x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
+ x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
+ x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
+ x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
+ x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
+ x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
+ x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
+ x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
+ x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
+ x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
}
x0 += j0
x1 += j1
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/flow.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/flow.go
index b51f0e0cf1f5..750ac52f2a52 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/http2/flow.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/flow.go
@@ -6,23 +6,91 @@
package http2
-// flow is the flow control window's size.
-type flow struct {
+// inflowMinRefresh is the minimum number of bytes we'll send for a
+// flow control window update.
+const inflowMinRefresh = 4 << 10
+
+// inflow accounts for an inbound flow control window.
+// It tracks both the latest window sent to the peer (used for enforcement)
+// and the accumulated unsent window.
+type inflow struct {
+ avail int32
+ unsent int32
+}
+
+// set sets the initial window.
+func (f *inflow) init(n int32) {
+ f.avail = n
+}
+
+// add adds n bytes to the window, with a maximum window size of max,
+// indicating that the peer can now send us more data.
+// For example, the user read from a {Request,Response} body and consumed
+// some of the buffered data, so the peer can now send more.
+// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
+// Window updates are accumulated and sent when the unsent capacity
+// is at least inflowMinRefresh or will at least double the peer's available window.
+func (f *inflow) add(n int) (connAdd int32) {
+ if n < 0 {
+ panic("negative update")
+ }
+ unsent := int64(f.unsent) + int64(n)
+ // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
+ // RFC 7540 Section 6.9.1.
+ const maxWindow = 1<<31 - 1
+ if unsent+int64(f.avail) > maxWindow {
+ panic("flow control update exceeds maximum window size")
+ }
+ f.unsent = int32(unsent)
+ if f.unsent < inflowMinRefresh && f.unsent < f.avail {
+ // If there aren't at least inflowMinRefresh bytes of window to send,
+ // and this update won't at least double the window, buffer the update for later.
+ return 0
+ }
+ f.avail += f.unsent
+ f.unsent = 0
+ return int32(unsent)
+}
+
+// take attempts to take n bytes from the peer's flow control window.
+// It reports whether the window has available capacity.
+func (f *inflow) take(n uint32) bool {
+ if n > uint32(f.avail) {
+ return false
+ }
+ f.avail -= int32(n)
+ return true
+}
+
+// takeInflows attempts to take n bytes from two inflows,
+// typically connection-level and stream-level flows.
+// It reports whether both windows have available capacity.
+func takeInflows(f1, f2 *inflow, n uint32) bool {
+ if n > uint32(f1.avail) || n > uint32(f2.avail) {
+ return false
+ }
+ f1.avail -= int32(n)
+ f2.avail -= int32(n)
+ return true
+}
+
+// outflow is the outbound flow control window's size.
+type outflow struct {
_ incomparable
// n is the number of DATA bytes we're allowed to send.
- // A flow is kept both on a conn and a per-stream.
+ // An outflow is kept both on a conn and a per-stream.
n int32
- // conn points to the shared connection-level flow that is
- // shared by all streams on that conn. It is nil for the flow
+ // conn points to the shared connection-level outflow that is
+ // shared by all streams on that conn. It is nil for the outflow
// that's on the conn directly.
- conn *flow
+ conn *outflow
}
-func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf }
-func (f *flow) available() int32 {
+func (f *outflow) available() int32 {
n := f.n
if f.conn != nil && f.conn.n < n {
n = f.conn.n
@@ -30,7 +98,7 @@ func (f *flow) available() int32 {
return n
}
-func (f *flow) take(n int32) {
+func (f *outflow) take(n int32) {
if n > f.available() {
panic("internal error: took too much")
}
@@ -42,7 +110,7 @@ func (f *flow) take(n int32) {
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
-func (f *flow) add(n int32) bool {
+func (f *outflow) add(n int32) bool {
sum := f.n + n
if (sum > n) == (f.n > 0) {
f.n = sum
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go
index 4eb7617fa0db..b624dc0a705e 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go
@@ -448,7 +448,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize)
- sc.inflow.add(initialWindowSize)
+ sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
@@ -563,8 +563,8 @@ type serverConn struct {
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
- flow flow // conn-wide (not stream-specific) outbound flow control
- inflow flow // conn-wide inbound flow control
+ flow outflow // conn-wide (not stream-specific) outbound flow control
+ inflow inflow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
@@ -641,10 +641,10 @@ type stream struct {
cancelCtx func()
// owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow flow // limits writing from Handler to client
- inflow flow // what the client is allowed to POST/etc to us
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow outflow // limits writing from Handler to client
+ inflow inflow // what the client is allowed to POST/etc to us
state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
@@ -1503,7 +1503,7 @@ func (sc *serverConn) processFrame(f Frame) error {
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
if f, ok := f.(*DataFrame); ok {
- if sc.inflow.available() < int32(f.Length) {
+ if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
}
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
@@ -1775,14 +1775,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
// But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going
// to consume them.
- if sc.inflow.available() < int32(f.Length) {
+ if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
- // Deduct the flow control from inflow, since we're
- // going to immediately add it back in
- // sendWindowUpdate, which also schedules sending the
- // frames.
- sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
if st != nil && st.resetQueued {
@@ -1797,10 +1792,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
- if sc.inflow.available() < int32(f.Length) {
+ if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
- sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
@@ -1811,10 +1805,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if f.Length > 0 {
// Check whether the client has flow control quota.
- if st.inflow.available() < int32(f.Length) {
+ if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
}
- st.inflow.take(int32(f.Length))
if len(data) > 0 {
wrote, err := st.body.Write(data)
@@ -1830,10 +1823,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Return any padded flow control now, since we won't
// refund it later on body reads.
- if pad := int32(f.Length) - int32(len(data)); pad > 0 {
- sc.sendWindowUpdate32(nil, pad)
- sc.sendWindowUpdate32(st, pad)
- }
+ // Call sendWindowUpdate even if there is no padding,
+ // to return buffered flow control credit if the sent
+ // window has shrunk.
+ pad := int32(f.Length) - int32(len(data))
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
}
if f.StreamEnded() {
st.endStream()
@@ -2105,8 +2100,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@@ -2388,47 +2382,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) {
}
// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
- sc.serveG.check()
- // "The legal range for the increment to the flow control
- // window is 1 to 2^31-1 (2,147,483,647) octets."
- // A Go Read call on 64-bit machines could in theory read
- // a larger Read than this. Very unlikely, but we handle it here
- // rather than elsewhere for now.
- const maxUint31 = 1<<31 - 1
- for n > maxUint31 {
- sc.sendWindowUpdate32(st, maxUint31)
- n -= maxUint31
- }
- sc.sendWindowUpdate32(st, int32(n))
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.sendWindowUpdate(st, int(n))
}
// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
sc.serveG.check()
- if n == 0 {
- return
- }
- if n < 0 {
- panic("negative update")
- }
var streamID uint32
- if st != nil {
+ var send int32
+ if st == nil {
+ send = sc.inflow.add(n)
+ } else {
streamID = st.id
+ send = st.inflow.add(n)
+ }
+ if send == 0 {
+ return
}
sc.writeFrame(FrameWriteRequest{
- write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ write: writeWindowUpdate{streamID: streamID, n: uint32(send)},
stream: st,
})
- var ok bool
- if st == nil {
- ok = sc.inflow.add(n)
- } else {
- ok = st.inflow.add(n)
- }
- if !ok {
- panic("internal error; sent too many window updates without decrements?")
- }
}
// requestBody is the Handler's Request.Body type.
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go
index 30f706e6cb81..b43ec10cfed9 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go
@@ -47,10 +47,6 @@ const (
// we buffer per stream.
transportDefaultStreamFlow = 4 << 20
- // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
- // a stream-level WINDOW_UPDATE for at a time.
- transportDefaultStreamMinRefresh = 4 << 10
-
defaultUserAgent = "Go-http-client/2.0"
// initialMaxConcurrentStreams is a connections maxConcurrentStreams until
@@ -310,8 +306,8 @@ type ClientConn struct {
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow flow // our conn-level flow control quota (cs.flow is per stream)
- inflow flow // peer's conn-level flow control
+ flow outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow inflow // peer's conn-level flow control
doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool
closed bool
@@ -376,10 +372,10 @@ type clientStream struct {
respHeaderRecv chan struct{} // closed when headers are received
res *http.Response // set if respHeaderRecv is closed
- flow flow // guarded by cc.mu
- inflow flow // guarded by cc.mu
- bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
- readErr error // sticky read error; owned by transportResponseBody.Read
+ flow outflow // guarded by cc.mu
+ inflow inflow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
reqBody io.ReadCloser
reqBodyContentLength int64 // -1 means unknown
@@ -811,7 +807,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
@@ -2073,8 +2069,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
- cs.inflow.add(transportDefaultStreamFlow)
- cs.inflow.setConnFlow(&cc.inflow)
+ cs.inflow.init(transportDefaultStreamFlow)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@@ -2533,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
}
cc.mu.Lock()
- var connAdd, streamAdd int32
- // Check the conn-level first, before the stream-level.
- if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
- connAdd = transportDefaultConnFlow - v
- cc.inflow.add(connAdd)
- }
+ connAdd := cc.inflow.add(n)
+ var streamAdd int32
if err == nil { // No need to refresh if the stream is over or failed.
- // Consider any buffered body data (read from the conn but not
- // consumed by the client) when computing flow control for this
- // stream.
- v := int(cs.inflow.available()) + cs.bufPipe.Len()
- if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
- streamAdd = int32(transportDefaultStreamFlow - v)
- cs.inflow.add(streamAdd)
- }
+ streamAdd = cs.inflow.add(n)
}
cc.mu.Unlock()
@@ -2575,17 +2559,15 @@ func (b transportResponseBody) Close() error {
if unread > 0 {
cc.mu.Lock()
// Return connection-level flow control.
- if unread > 0 {
- cc.inflow.add(int32(unread))
- }
+ connAdd := cc.inflow.add(unread)
cc.mu.Unlock()
// TODO(dneil): Acquiring this mutex can block indefinitely.
// Move flow control return to a goroutine?
cc.wmu.Lock()
// Return connection-level flow control.
- if unread > 0 {
- cc.fr.WriteWindowUpdate(0, uint32(unread))
+ if connAdd > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
}
cc.bw.Flush()
cc.wmu.Unlock()
@@ -2628,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// But at least return their flow control:
if f.Length > 0 {
cc.mu.Lock()
- cc.inflow.add(int32(f.Length))
+ ok := cc.inflow.take(f.Length)
+ connAdd := cc.inflow.add(int(f.Length))
cc.mu.Unlock()
-
- cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(f.Length))
- cc.bw.Flush()
- cc.wmu.Unlock()
+ if !ok {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ if connAdd > 0 {
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
}
return nil
}
@@ -2665,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
// Check connection-level flow control.
cc.mu.Lock()
- if cs.inflow.available() >= int32(f.Length) {
- cs.inflow.take(int32(f.Length))
- } else {
+ if !takeInflows(&cc.inflow, &cs.inflow, f.Length) {
cc.mu.Unlock()
return ConnectionError(ErrCodeFlowControl)
}
@@ -2689,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
}
- if refund > 0 {
- cc.inflow.add(int32(refund))
- if !didReset {
- cs.inflow.add(int32(refund))
- }
+ sendConn := cc.inflow.add(refund)
+ var sendStream int32
+ if !didReset {
+ sendStream = cs.inflow.add(refund)
}
cc.mu.Unlock()
- if refund > 0 {
+ if sendConn > 0 || sendStream > 0 {
cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(refund))
- if !didReset {
- cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
+ if sendConn > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(sendConn))
+ }
+ if sendStream > 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream))
}
cc.bw.Flush()
cc.wmu.Unlock()
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/websocket/websocket.go b/cluster-autoscaler/vendor/golang.org/x/net/websocket/websocket.go
index ea422e110d73..90a2257cd54e 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/websocket/websocket.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/websocket/websocket.go
@@ -5,11 +5,10 @@
// Package websocket implements a client and server for the WebSocket protocol
// as specified in RFC 6455.
//
-// This package currently lacks some features found in alternative
-// and more actively maintained WebSocket packages:
+// This package currently lacks some features found in an alternative
+// and more actively maintained WebSocket package:
//
-// https://godoc.org/github.com/gorilla/websocket
-// https://godoc.org/nhooyr.io/websocket
+// https://pkg.go.dev/nhooyr.io/websocket
package websocket // import "golang.org/x/net/websocket"
import (
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
index 79a38a0b9bcc..a968b80fa6ab 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
@@ -4,6 +4,11 @@
package cpu
+import (
+ "strings"
+ "syscall"
+)
+
// HWCAP/HWCAP2 bits. These are exposed by Linux.
const (
hwcap_FP = 1 << 0
@@ -32,10 +37,45 @@ const (
hwcap_ASIMDFHM = 1 << 23
)
+// linuxKernelCanEmulateCPUID reports whether we're running
+// on Linux 4.11+. Ideally we'd like to ask the question about
+// whether the current kernel contains
+// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2
+// but the version number will have to do.
+func linuxKernelCanEmulateCPUID() bool {
+ var un syscall.Utsname
+ syscall.Uname(&un)
+ var sb strings.Builder
+ for _, b := range un.Release[:] {
+ if b == 0 {
+ break
+ }
+ sb.WriteByte(byte(b))
+ }
+ major, minor, _, ok := parseRelease(sb.String())
+ return ok && (major > 4 || major == 4 && minor >= 11)
+}
+
func doinit() {
if err := readHWCAP(); err != nil {
- // failed to read /proc/self/auxv, try reading registers directly
- readARM64Registers()
+ // We failed to read /proc/self/auxv. This can happen if the binary has
+ // been given extra capabilities(7) with /bin/setcap.
+ //
+ // When this happens, we have two options. If the Linux kernel is new
+ // enough (4.11+), we can read the arm64 registers directly which'll
+ // trap into the kernel and then return back to userspace.
+ //
+ // But on older kernels, such as Linux 4.4.180 as used on many Synology
+ // devices, calling readARM64Registers (specifically getisar0) will
+ // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo
+ // instead.
+ //
+ // See golang/go#57336.
+ if linuxKernelCanEmulateCPUID() {
+ readARM64Registers()
+ } else {
+ readLinuxProcCPUInfo()
+ }
return
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/cpu/parse.go b/cluster-autoscaler/vendor/golang.org/x/sys/cpu/parse.go
new file mode 100644
index 000000000000..762b63d6882c
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/cpu/parse.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import "strconv"
+
+// parseRelease parses a dot-separated version number. It follows the semver
+// syntax, but allows the minor and patch versions to be elided.
+//
+// This is a copy of the Go runtime's parseRelease from
+// https://golang.org/cl/209597.
+func parseRelease(rel string) (major, minor, patch int, ok bool) {
+ // Strip anything after a dash or plus.
+ for i := 0; i < len(rel); i++ {
+ if rel[i] == '-' || rel[i] == '+' {
+ rel = rel[:i]
+ break
+ }
+ }
+
+ next := func() (int, bool) {
+ for i := 0; i < len(rel); i++ {
+ if rel[i] == '.' {
+ ver, err := strconv.Atoi(rel[:i])
+ rel = rel[i+1:]
+ return ver, err == nil
+ }
+ }
+ ver, err := strconv.Atoi(rel)
+ rel = ""
+ return ver, err == nil
+ }
+ if major, ok = next(); !ok || rel == "" {
+ return
+ }
+ if minor, ok = next(); !ok || rel == "" {
+ return
+ }
+ patch, ok = next()
+ return
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/cluster-autoscaler/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
new file mode 100644
index 000000000000..d87bd6b3eb05
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
@@ -0,0 +1,54 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && arm64
+// +build linux,arm64
+
+package cpu
+
+import (
+ "errors"
+ "io"
+ "os"
+ "strings"
+)
+
+func readLinuxProcCPUInfo() error {
+ f, err := os.Open("/proc/cpuinfo")
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var buf [1 << 10]byte // enough for first CPU
+ n, err := io.ReadFull(f, buf[:])
+ if err != nil && err != io.ErrUnexpectedEOF {
+ return err
+ }
+ in := string(buf[:n])
+ const features = "\nFeatures : "
+ i := strings.Index(in, features)
+ if i == -1 {
+ return errors.New("no CPU features found")
+ }
+ in = in[i+len(features):]
+ if i := strings.Index(in, "\n"); i != -1 {
+ in = in[:i]
+ }
+ m := map[string]*bool{}
+
+ initOptions() // need it early here; it's harmless to call twice
+ for _, o := range options {
+ m[o.Name] = o.Feature
+ }
+ // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm".
+ m["evtstrm"] = &ARM64.HasEVTSTRM
+
+ for _, f := range strings.Fields(in) {
+ if p, ok := m[f]; ok {
+ *p = true
+ }
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo.go
index 0dee23222ca8..b06f52d748f6 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build gccgo && !aix
-// +build gccgo,!aix
+//go:build gccgo && !aix && !hurd
+// +build gccgo,!aix,!hurd
package unix
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo_c.c b/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo_c.c
index 2cb1fefac640..c4fce0e70036 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build gccgo
-// +build !aix
+// +build gccgo,!hurd
+// +build !aix,!hurd
#include
#include
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ioctl.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ioctl.go
index 6c7ad052e6b3..1c51b0ec2bcd 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ioctl.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ioctl.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris
+// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris
package unix
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh
index 727cba212704..8e3947c3686c 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkall.sh
@@ -174,10 +174,10 @@ openbsd_arm64)
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_mips64)
+ mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd"
+ mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
- mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index 61c0d0de15d5..a41111a794e2 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -255,6 +255,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index de7c23e0648a..d50b9dc250b7 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -319,6 +319,7 @@ func PtraceSingleStep(pid int) (err error) {
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_hurd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_hurd.go
new file mode 100644
index 000000000000..4ffb64808d75
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_hurd.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build hurd
+// +build hurd
+
+package unix
+
+/*
+#include
+int ioctl(int, unsigned long int, uintptr_t);
+*/
+import "C"
+
+func ioctl(fd int, req uint, arg uintptr) (err error) {
+ r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_hurd_386.go
new file mode 100644
index 000000000000..7cf54a3e4f10
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_hurd_386.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 && hurd
+// +build 386,hurd
+
+package unix
+
+const (
+ TIOCGETA = 0x62251713
+)
+
+type Winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go
index c5a98440eca1..d839962e6633 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1973,36 +1973,46 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) {
//sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2
//sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2
-func bytes2iovec(bs [][]byte) []Iovec {
- iovecs := make([]Iovec, len(bs))
- for i, b := range bs {
- iovecs[i].SetLen(len(b))
+// minIovec is the size of the small initial allocation used by
+// Readv, Writev, etc.
+//
+// This small allocation gets stack allocated, which lets the
+// common use case of len(iovs) <= minIovs avoid more expensive
+// heap allocations.
+const minIovec = 8
+
+// appendBytes converts bs to Iovecs and appends them to vecs.
+func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
+ for _, b := range bs {
+ var v Iovec
+ v.SetLen(len(b))
if len(b) > 0 {
- iovecs[i].Base = &b[0]
+ v.Base = &b[0]
} else {
- iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero))
+ v.Base = (*byte)(unsafe.Pointer(&_zero))
}
+ vecs = append(vecs, v)
}
- return iovecs
+ return vecs
}
-// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit
-// systems, hi will always be 0. On 32-bit systems, offs will be split in half.
-// preadv/pwritev chose this calling convention so they don't need to add a
-// padding-register for alignment on ARM.
+// offs2lohi splits offs into its low and high order bits.
func offs2lohi(offs int64) (lo, hi uintptr) {
- return uintptr(offs), uintptr(uint64(offs) >> SizeofLong)
+ const longBits = SizeofLong * 8
+ return uintptr(offs), uintptr(uint64(offs) >> longBits)
}
func Readv(fd int, iovs [][]byte) (n int, err error) {
- iovecs := bytes2iovec(iovs)
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
n, err = readv(fd, iovecs)
readvRacedetect(iovecs, n, err)
return n, err
}
func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
- iovecs := bytes2iovec(iovs)
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
lo, hi := offs2lohi(offset)
n, err = preadv(fd, iovecs, lo, hi)
readvRacedetect(iovecs, n, err)
@@ -2010,7 +2020,8 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
}
func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) {
- iovecs := bytes2iovec(iovs)
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
lo, hi := offs2lohi(offset)
n, err = preadv2(fd, iovecs, lo, hi, flags)
readvRacedetect(iovecs, n, err)
@@ -2037,7 +2048,8 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
}
func Writev(fd int, iovs [][]byte) (n int, err error) {
- iovecs := bytes2iovec(iovs)
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
}
@@ -2047,7 +2059,8 @@ func Writev(fd int, iovs [][]byte) (n int, err error) {
}
func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
- iovecs := bytes2iovec(iovs)
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
}
@@ -2058,7 +2071,8 @@ func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
}
func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) {
- iovecs := bytes2iovec(iovs)
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 666f0a1b33d2..35a3ad758f59 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -110,6 +110,20 @@ func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
+func SysctlUvmexp(name string) (*Uvmexp, error) {
+ mib, err := sysctlmib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ n := uintptr(SizeofUvmexp)
+ var u Uvmexp
+ if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil {
+ return nil, err
+ }
+ return &u, nil
+}
+
func Pipe(p []int) (err error) {
return Pipe2(p, 0)
}
@@ -245,6 +259,7 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 78daceb338bc..9b67b908e5f9 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -220,6 +220,7 @@ func Uname(uname *Utsname) error {
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
index e23c5394eff3..04aa43f41b25 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build openbsd && !mips64
-// +build openbsd,!mips64
+//go:build openbsd
+// +build openbsd
package unix
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 2109e569ccef..07ac56109a05 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -590,6 +590,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys Creat(path string, mode uint32) (fd int, err error)
//sys Dup(fd int) (nfd int, err error)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go
index 00bafda86545..a386f8897df3 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -331,6 +331,19 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
return
}
+// Recvmsg receives a message from a socket using the recvmsg system call. The
+// received non-control data will be written to p, and any "out of band"
+// control data will be written to oob. The flags are passed to recvmsg.
+//
+// The results are:
+// - n is the number of non-control data bytes read into p
+// - oobn is the number of control data bytes read into oob; this may be interpreted using [ParseSocketControlMessage]
+// - recvflags is flags returned by recvmsg
+// - from is the address of the sender
+//
+// If the underlying socket type is not SOCK_DGRAM, a received message
+// containing oob data and a single '\0' of non-control data is treated as if
+// the message contained only control data, i.e. n will be zero on return.
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
var iov [1]Iovec
if len(p) > 0 {
@@ -346,13 +359,9 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
return
}
-// RecvmsgBuffers receives a message from a socket using the recvmsg
-// system call. The flags are passed to recvmsg. Any non-control data
-// read is scattered into the buffers slices. The results are:
-// - n is the number of non-control data read into bufs
-// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage]
-// - recvflags is flags returned by recvmsg
-// - from is the address of the sender
+// RecvmsgBuffers receives a message from a socket using the recvmsg system
+// call. This function is equivalent to Recvmsg, but non-control data read is
+// scattered into the buffers slices.
func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
iov := make([]Iovec, len(buffers))
for i := range buffers {
@@ -371,11 +380,38 @@ func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn in
return
}
+// Sendmsg sends a message on a socket to an address using the sendmsg system
+// call. This function is equivalent to SendmsgN, but does not return the
+// number of bytes actually sent.
func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
_, err = SendmsgN(fd, p, oob, to, flags)
return
}
+// SendmsgN sends a message on a socket to an address using the sendmsg system
+// call. p contains the non-control data to send, and oob contains the "out of
+// band" control data. The flags are passed to sendmsg. The number of
+// non-control bytes actually written to the socket is returned.
+//
+// Some socket types do not support sending control data without accompanying
+// non-control data. If p is empty, and oob contains control data, and the
+// underlying socket type is not SOCK_DGRAM, p will be treated as containing a
+// single '\0' and the return value will indicate zero bytes sent.
+//
+// The Go function Recvmsg, if called with an empty p and a non-empty oob,
+// will read and ignore this additional '\0'. If the message is received by
+// code that does not use Recvmsg, or that does not use Go at all, that code
+// will need to be written to expect and ignore the additional '\0'.
+//
+// If you need to send non-empty oob with p actually empty, and if the
+// underlying socket type supports it, you can do so via a raw system call as
+// follows:
+//
+// msg := &unix.Msghdr{
+// Control: &oob[0],
+// }
+// msg.SetControllen(len(oob))
+// n, _, errno := unix.Syscall(unix.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), flags)
func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
var iov [1]Iovec
if len(p) > 0 {
@@ -394,9 +430,8 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
}
// SendmsgBuffers sends a message on a socket to an address using the sendmsg
-// system call. The flags are passed to sendmsg. Any non-control data written
-// is gathered from buffers. The function returns the number of bytes written
-// to the socket.
+// system call. This function is equivalent to SendmsgN, but the non-control
+// data is gathered from buffers.
func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) {
iov := make([]Iovec, len(buffers))
for i := range buffers {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
index 6d56edc05ac3..af20e474b388 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
@@ -46,6 +46,7 @@ const (
AF_SNA = 0xb
AF_UNIX = 0x1
AF_UNSPEC = 0x0
+ ALTWERASE = 0x200
ARPHRD_ETHER = 0x1
ARPHRD_FRELAY = 0xf
ARPHRD_IEEE1394 = 0x18
@@ -108,6 +109,15 @@ const (
BPF_DIRECTION_IN = 0x1
BPF_DIRECTION_OUT = 0x2
BPF_DIV = 0x30
+ BPF_FILDROP_CAPTURE = 0x1
+ BPF_FILDROP_DROP = 0x2
+ BPF_FILDROP_PASS = 0x0
+ BPF_F_DIR_IN = 0x10
+ BPF_F_DIR_MASK = 0x30
+ BPF_F_DIR_OUT = 0x20
+ BPF_F_DIR_SHIFT = 0x4
+ BPF_F_FLOWID = 0x8
+ BPF_F_PRI_MASK = 0x7
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -136,6 +146,7 @@ const (
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
+ BPF_RND = 0xc0
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
@@ -147,6 +158,12 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CLOCK_BOOTTIME = 0x6
+ CLOCK_MONOTONIC = 0x3
+ CLOCK_PROCESS_CPUTIME_ID = 0x2
+ CLOCK_REALTIME = 0x0
+ CLOCK_THREAD_CPUTIME_ID = 0x4
+ CLOCK_UPTIME = 0x5
CPUSTATES = 0x6
CP_IDLE = 0x5
CP_INTR = 0x4
@@ -170,7 +187,65 @@ const (
CTL_KERN = 0x1
CTL_MAXNAME = 0xc
CTL_NET = 0x4
+ DIOCADDQUEUE = 0xc100445d
+ DIOCADDRULE = 0xccc84404
+ DIOCADDSTATE = 0xc1084425
+ DIOCCHANGERULE = 0xccc8441a
+ DIOCCLRIFFLAG = 0xc024445a
+ DIOCCLRSRCNODES = 0x20004455
+ DIOCCLRSTATES = 0xc0d04412
+ DIOCCLRSTATUS = 0xc0244416
+ DIOCGETLIMIT = 0xc0084427
+ DIOCGETQSTATS = 0xc1084460
+ DIOCGETQUEUE = 0xc100445f
+ DIOCGETQUEUES = 0xc100445e
+ DIOCGETRULE = 0xccc84407
+ DIOCGETRULES = 0xccc84406
+ DIOCGETRULESET = 0xc444443b
+ DIOCGETRULESETS = 0xc444443a
+ DIOCGETSRCNODES = 0xc0084454
+ DIOCGETSTATE = 0xc1084413
+ DIOCGETSTATES = 0xc0084419
+ DIOCGETSTATUS = 0xc1e84415
+ DIOCGETSYNFLWATS = 0xc0084463
+ DIOCGETTIMEOUT = 0xc008441e
+ DIOCIGETIFACES = 0xc0244457
+ DIOCKILLSRCNODES = 0xc068445b
+ DIOCKILLSTATES = 0xc0d04429
+ DIOCNATLOOK = 0xc0504417
+ DIOCOSFPADD = 0xc084444f
DIOCOSFPFLUSH = 0x2000444e
+ DIOCOSFPGET = 0xc0844450
+ DIOCRADDADDRS = 0xc44c4443
+ DIOCRADDTABLES = 0xc44c443d
+ DIOCRCLRADDRS = 0xc44c4442
+ DIOCRCLRASTATS = 0xc44c4448
+ DIOCRCLRTABLES = 0xc44c443c
+ DIOCRCLRTSTATS = 0xc44c4441
+ DIOCRDELADDRS = 0xc44c4444
+ DIOCRDELTABLES = 0xc44c443e
+ DIOCRGETADDRS = 0xc44c4446
+ DIOCRGETASTATS = 0xc44c4447
+ DIOCRGETTABLES = 0xc44c443f
+ DIOCRGETTSTATS = 0xc44c4440
+ DIOCRINADEFINE = 0xc44c444d
+ DIOCRSETADDRS = 0xc44c4445
+ DIOCRSETTFLAGS = 0xc44c444a
+ DIOCRTSTADDRS = 0xc44c4449
+ DIOCSETDEBUG = 0xc0044418
+ DIOCSETHOSTID = 0xc0044456
+ DIOCSETIFFLAG = 0xc0244459
+ DIOCSETLIMIT = 0xc0084428
+ DIOCSETREASS = 0xc004445c
+ DIOCSETSTATUSIF = 0xc0244414
+ DIOCSETSYNCOOKIES = 0xc0014462
+ DIOCSETSYNFLWATS = 0xc0084461
+ DIOCSETTIMEOUT = 0xc008441d
+ DIOCSTART = 0x20004401
+ DIOCSTOP = 0x20004402
+ DIOCXBEGIN = 0xc00c4451
+ DIOCXCOMMIT = 0xc00c4452
+ DIOCXROLLBACK = 0xc00c4453
DLT_ARCNET = 0x7
DLT_ATM_RFC1483 = 0xb
DLT_AX25 = 0x3
@@ -186,6 +261,7 @@ const (
DLT_LOOP = 0xc
DLT_MPLS = 0xdb
DLT_NULL = 0x0
+ DLT_OPENFLOW = 0x10b
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x12
DLT_PPP = 0x9
@@ -196,6 +272,23 @@ const (
DLT_RAW = 0xe
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xf
+ DLT_USBPCAP = 0xf9
+ DLT_USER0 = 0x93
+ DLT_USER1 = 0x94
+ DLT_USER10 = 0x9d
+ DLT_USER11 = 0x9e
+ DLT_USER12 = 0x9f
+ DLT_USER13 = 0xa0
+ DLT_USER14 = 0xa1
+ DLT_USER15 = 0xa2
+ DLT_USER2 = 0x95
+ DLT_USER3 = 0x96
+ DLT_USER4 = 0x97
+ DLT_USER5 = 0x98
+ DLT_USER6 = 0x99
+ DLT_USER7 = 0x9a
+ DLT_USER8 = 0x9b
+ DLT_USER9 = 0x9c
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
@@ -215,6 +308,8 @@ const (
EMUL_ENABLED = 0x1
EMUL_NATIVE = 0x2
ENDRUNDISC = 0x9
+ ETH64_8021_RSVD_MASK = 0xfffffffffff0
+ ETH64_8021_RSVD_PREFIX = 0x180c2000000
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERTYPE_8023 = 0x4
@@ -267,6 +362,7 @@ const (
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_EAPOL = 0x888e
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
@@ -298,6 +394,7 @@ const (
ETHERTYPE_LLDP = 0x88cc
ETHERTYPE_LOGICRAFT = 0x8148
ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MACSEC = 0x88e5
ETHERTYPE_MATRA = 0x807a
ETHERTYPE_MAX = 0xffff
ETHERTYPE_MERIT = 0x807c
@@ -326,15 +423,17 @@ const (
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NHRP = 0x2001
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NSH = 0x984f
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
- ETHERTYPE_PAE = 0x888e
+ ETHERTYPE_PBB = 0x88e7
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
ETHERTYPE_PPP = 0x880b
@@ -409,28 +508,40 @@ const (
ETHER_CRC_POLY_LE = 0xedb88320
ETHER_HDR_LEN = 0xe
ETHER_MAX_DIX_LEN = 0x600
+ ETHER_MAX_HARDMTU_LEN = 0xff9b
ETHER_MAX_LEN = 0x5ee
ETHER_MIN_LEN = 0x40
ETHER_TYPE_LEN = 0x2
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = -0x3
+ EVFILT_DEVICE = -0x8
+ EVFILT_EXCEPT = -0x9
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
- EVFILT_SYSCOUNT = 0x7
+ EVFILT_SYSCOUNT = 0x9
EVFILT_TIMER = -0x7
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
+ EVL_ENCAPLEN = 0x4
+ EVL_PRIO_BITS = 0xd
+ EVL_PRIO_MAX = 0x7
+ EVL_VLID_MASK = 0xfff
+ EVL_VLID_MAX = 0xffe
+ EVL_VLID_MIN = 0x1
+ EVL_VLID_NULL = 0x0
EV_ADD = 0x1
EV_CLEAR = 0x20
EV_DELETE = 0x2
EV_DISABLE = 0x8
+ EV_DISPATCH = 0x80
EV_ENABLE = 0x4
EV_EOF = 0x8000
EV_ERROR = 0x4000
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
- EV_SYSFLAGS = 0xf000
+ EV_RECEIPT = 0x40
+ EV_SYSFLAGS = 0xf800
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
@@ -443,6 +554,7 @@ const (
F_GETFL = 0x3
F_GETLK = 0x7
F_GETOWN = 0x5
+ F_ISATTY = 0xb
F_OK = 0x0
F_RDLCK = 0x1
F_SETFD = 0x2
@@ -460,7 +572,6 @@ const (
IEXTEN = 0x400
IFAN_ARRIVAL = 0x0
IFAN_DEPARTURE = 0x1
- IFA_ROUTE = 0x1
IFF_ALLMULTI = 0x200
IFF_BROADCAST = 0x2
IFF_CANTCHANGE = 0x8e52
@@ -471,12 +582,12 @@ const (
IFF_LOOPBACK = 0x8
IFF_MULTICAST = 0x8000
IFF_NOARP = 0x80
- IFF_NOTRAILERS = 0x20
IFF_OACTIVE = 0x400
IFF_POINTOPOINT = 0x10
IFF_PROMISC = 0x100
IFF_RUNNING = 0x40
IFF_SIMPLEX = 0x800
+ IFF_STATICARP = 0x20
IFF_UP = 0x1
IFNAMSIZ = 0x10
IFT_1822 = 0x2
@@ -605,6 +716,7 @@ const (
IFT_LINEGROUP = 0xd2
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
+ IFT_MBIM = 0xfa
IFT_MEDIAMAILOVERIP = 0x8b
IFT_MFSIGLINK = 0xa7
IFT_MIOX25 = 0x26
@@ -695,6 +807,7 @@ const (
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
+ IFT_WIREGUARD = 0xfb
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
@@ -729,8 +842,6 @@ const (
IPPROTO_AH = 0x33
IPPROTO_CARP = 0x70
IPPROTO_DIVERT = 0x102
- IPPROTO_DIVERT_INIT = 0x2
- IPPROTO_DIVERT_RESP = 0x1
IPPROTO_DONE = 0x101
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
@@ -762,9 +873,11 @@ const (
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
IPV6_AUTH_LEVEL = 0x35
IPV6_AUTOFLOWLABEL = 0x3b
IPV6_CHECKSUM = 0x1a
@@ -787,6 +900,7 @@ const (
IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff
IPV6_MAXPACKET = 0xffff
+ IPV6_MINHOPCOUNT = 0x41
IPV6_MMTU = 0x500
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_IF = 0x9
@@ -826,12 +940,12 @@ const (
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
- IP_DIVERTFL = 0x1022
IP_DROP_MEMBERSHIP = 0xd
IP_ESP_NETWORK_LEVEL = 0x16
IP_ESP_TRANS_LEVEL = 0x15
IP_HDRINCL = 0x2
IP_IPCOMP_LEVEL = 0x1d
+ IP_IPDEFTTL = 0x25
IP_IPSECFLOWINFO = 0x24
IP_IPSEC_LOCAL_AUTH = 0x1b
IP_IPSEC_LOCAL_CRED = 0x19
@@ -865,10 +979,15 @@ const (
IP_RETOPTS = 0x8
IP_RF = 0x8000
IP_RTABLE = 0x1021
+ IP_SENDSRCADDR = 0x7
IP_TOS = 0x3
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
+ ITIMER_PROF = 0x2
+ ITIMER_REAL = 0x0
+ ITIMER_VIRTUAL = 0x1
+ IUCLC = 0x1000
IXANY = 0x800
IXOFF = 0x400
IXON = 0x200
@@ -900,10 +1019,11 @@ const (
MAP_INHERIT_COPY = 0x1
MAP_INHERIT_NONE = 0x2
MAP_INHERIT_SHARE = 0x0
- MAP_NOEXTEND = 0x100
- MAP_NORESERVE = 0x40
+ MAP_INHERIT_ZERO = 0x3
+ MAP_NOEXTEND = 0x0
+ MAP_NORESERVE = 0x0
MAP_PRIVATE = 0x2
- MAP_RENAME = 0x20
+ MAP_RENAME = 0x0
MAP_SHARED = 0x1
MAP_STACK = 0x4000
MAP_TRYFIXED = 0x0
@@ -922,6 +1042,7 @@ const (
MNT_NOATIME = 0x8000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
+ MNT_NOPERM = 0x20
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_QUOTA = 0x2000
@@ -929,13 +1050,29 @@ const (
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x4000000
+ MNT_STALLED = 0x100000
+ MNT_SWAPPABLE = 0x200000
MNT_SYNCHRONOUS = 0x2
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x400ffff
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
+ MOUNT_AFS = "afs"
+ MOUNT_CD9660 = "cd9660"
+ MOUNT_EXT2FS = "ext2fs"
+ MOUNT_FFS = "ffs"
+ MOUNT_FUSEFS = "fuse"
+ MOUNT_MFS = "mfs"
+ MOUNT_MSDOS = "msdos"
+ MOUNT_NCPFS = "ncpfs"
+ MOUNT_NFS = "nfs"
+ MOUNT_NTFS = "ntfs"
+ MOUNT_TMPFS = "tmpfs"
+ MOUNT_UDF = "udf"
+ MOUNT_UFS = "ffs"
MSG_BCAST = 0x100
+ MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80
@@ -946,6 +1083,7 @@ const (
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
+ MSG_WAITFORONE = 0x1000
MS_ASYNC = 0x1
MS_INVALIDATE = 0x4
MS_SYNC = 0x2
@@ -953,12 +1091,16 @@ const (
NET_RT_DUMP = 0x1
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
- NET_RT_MAXID = 0x6
+ NET_RT_IFNAMES = 0x6
+ NET_RT_MAXID = 0x8
+ NET_RT_SOURCE = 0x7
NET_RT_STATS = 0x4
NET_RT_TABLE = 0x5
NFDBITS = 0x20
NOFLSH = 0x80000000
+ NOKERNINFO = 0x2000000
NOTE_ATTRIB = 0x8
+ NOTE_CHANGE = 0x1
NOTE_CHILD = 0x4
NOTE_DELETE = 0x1
NOTE_EOF = 0x2
@@ -968,6 +1110,7 @@ const (
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
+ NOTE_OOB = 0x4
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
@@ -977,11 +1120,13 @@ const (
NOTE_TRUNCATE = 0x80
NOTE_WRITE = 0x2
OCRNL = 0x10
+ OLCUC = 0x20
ONLCR = 0x2
ONLRET = 0x80
ONOCR = 0x40
ONOEOT = 0x8
OPOST = 0x1
+ OXTABS = 0x4
O_ACCMODE = 0x3
O_APPEND = 0x8
O_ASYNC = 0x40
@@ -1015,7 +1160,6 @@ const (
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
- PT_MASK = 0x3ff000
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
@@ -1027,19 +1171,25 @@ const (
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6
+ RTAX_BFD = 0xb
RTAX_BRD = 0x7
+ RTAX_DNS = 0xc
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_LABEL = 0xa
- RTAX_MAX = 0xb
+ RTAX_MAX = 0xf
RTAX_NETMASK = 0x2
+ RTAX_SEARCH = 0xe
RTAX_SRC = 0x8
RTAX_SRCMASK = 0x9
+ RTAX_STATIC = 0xd
RTA_AUTHOR = 0x40
+ RTA_BFD = 0x800
RTA_BRD = 0x80
+ RTA_DNS = 0x1000
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
@@ -1047,49 +1197,57 @@ const (
RTA_IFP = 0x10
RTA_LABEL = 0x400
RTA_NETMASK = 0x4
+ RTA_SEARCH = 0x4000
RTA_SRC = 0x100
RTA_SRCMASK = 0x200
+ RTA_STATIC = 0x2000
RTF_ANNOUNCE = 0x4000
+ RTF_BFD = 0x1000000
RTF_BLACKHOLE = 0x1000
+ RTF_BROADCAST = 0x400000
+ RTF_CACHED = 0x20000
RTF_CLONED = 0x10000
RTF_CLONING = 0x100
+ RTF_CONNECTED = 0x800000
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
- RTF_FMASK = 0x10f808
+ RTF_FMASK = 0x110fc08
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_LLINFO = 0x400
- RTF_MASK = 0x80
+ RTF_LOCAL = 0x200000
RTF_MODIFIED = 0x20
RTF_MPATH = 0x40000
RTF_MPLS = 0x100000
+ RTF_MULTICAST = 0x200
RTF_PERMANENT_ARP = 0x2000
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
RTF_PROTO3 = 0x2000
RTF_REJECT = 0x8
- RTF_SOURCE = 0x20000
RTF_STATIC = 0x800
- RTF_TUNNEL = 0x100000
RTF_UP = 0x1
RTF_USETRAILERS = 0x8000
- RTF_XRESOLVE = 0x200
+ RTM_80211INFO = 0x15
RTM_ADD = 0x1
+ RTM_BFD = 0x12
RTM_CHANGE = 0x3
+ RTM_CHGADDRATTR = 0x14
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_DESYNC = 0x10
RTM_GET = 0x4
RTM_IFANNOUNCE = 0xf
RTM_IFINFO = 0xe
- RTM_LOCK = 0x8
+ RTM_INVALIDATE = 0x11
RTM_LOSING = 0x5
RTM_MAXSIZE = 0x800
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
+ RTM_PROPOSAL = 0x13
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
- RTM_RTTUNIT = 0xf4240
+ RTM_SOURCE = 0x16
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
@@ -1099,67 +1257,74 @@ const (
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
+ RT_TABLEID_BITS = 0x8
+ RT_TABLEID_MASK = 0xff
RT_TABLEID_MAX = 0xff
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x4
+ SEEK_CUR = 0x1
+ SEEK_END = 0x2
+ SEEK_SET = 0x0
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDMULTI = 0x80206931
SIOCAIFADDR = 0x8040691a
SIOCAIFGROUP = 0x80246987
- SIOCALIFADDR = 0x8218691c
SIOCATMARK = 0x40047307
- SIOCBRDGADD = 0x8054693c
- SIOCBRDGADDS = 0x80546941
- SIOCBRDGARL = 0x806e694d
+ SIOCBRDGADD = 0x805c693c
+ SIOCBRDGADDL = 0x805c6949
+ SIOCBRDGADDS = 0x805c6941
+ SIOCBRDGARL = 0x808c694d
SIOCBRDGDADDR = 0x81286947
- SIOCBRDGDEL = 0x8054693d
- SIOCBRDGDELS = 0x80546942
- SIOCBRDGFLUSH = 0x80546948
- SIOCBRDGFRL = 0x806e694e
+ SIOCBRDGDEL = 0x805c693d
+ SIOCBRDGDELS = 0x805c6942
+ SIOCBRDGFLUSH = 0x805c6948
+ SIOCBRDGFRL = 0x808c694e
SIOCBRDGGCACHE = 0xc0146941
SIOCBRDGGFD = 0xc0146952
SIOCBRDGGHT = 0xc0146951
- SIOCBRDGGIFFLGS = 0xc054693e
+ SIOCBRDGGIFFLGS = 0xc05c693e
SIOCBRDGGMA = 0xc0146953
SIOCBRDGGPARAM = 0xc03c6958
SIOCBRDGGPRI = 0xc0146950
SIOCBRDGGRL = 0xc028694f
- SIOCBRDGGSIFS = 0xc054693c
SIOCBRDGGTO = 0xc0146946
- SIOCBRDGIFS = 0xc0546942
+ SIOCBRDGIFS = 0xc05c6942
SIOCBRDGRTS = 0xc0186943
SIOCBRDGSADDR = 0xc1286944
SIOCBRDGSCACHE = 0x80146940
SIOCBRDGSFD = 0x80146952
SIOCBRDGSHT = 0x80146951
- SIOCBRDGSIFCOST = 0x80546955
- SIOCBRDGSIFFLGS = 0x8054693f
- SIOCBRDGSIFPRIO = 0x80546954
+ SIOCBRDGSIFCOST = 0x805c6955
+ SIOCBRDGSIFFLGS = 0x805c693f
+ SIOCBRDGSIFPRIO = 0x805c6954
+ SIOCBRDGSIFPROT = 0x805c694a
SIOCBRDGSMA = 0x80146953
SIOCBRDGSPRI = 0x80146950
SIOCBRDGSPROTO = 0x8014695a
SIOCBRDGSTO = 0x80146945
SIOCBRDGSTXHC = 0x80146959
+ SIOCDELLABEL = 0x80206997
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
SIOCDIFGROUP = 0x80246989
+ SIOCDIFPARENT = 0x802069b4
SIOCDIFPHYADDR = 0x80206949
- SIOCDLIFADDR = 0x8218691e
+ SIOCDPWE3NEIGHBOR = 0x802069de
+ SIOCDVNETID = 0x802069af
SIOCGETKALIVE = 0xc01869a4
SIOCGETLABEL = 0x8020699a
+ SIOCGETMPWCFG = 0xc02069ae
SIOCGETPFLOW = 0xc02069fe
SIOCGETPFSYNC = 0xc02069f8
SIOCGETSGCNT = 0xc0147534
SIOCGETVIFCNT = 0xc0147533
SIOCGETVLAN = 0xc0206990
- SIOCGHIWAT = 0x40047301
SIOCGIFADDR = 0xc0206921
- SIOCGIFASYNCMAP = 0xc020697c
SIOCGIFBRDADDR = 0xc0206923
SIOCGIFCONF = 0xc0086924
SIOCGIFDATA = 0xc020691b
@@ -1168,40 +1333,53 @@ const (
SIOCGIFFLAGS = 0xc0206911
SIOCGIFGATTR = 0xc024698b
SIOCGIFGENERIC = 0xc020693a
+ SIOCGIFGLIST = 0xc024698d
SIOCGIFGMEMB = 0xc024698a
SIOCGIFGROUP = 0xc0246988
SIOCGIFHARDMTU = 0xc02069a5
- SIOCGIFMEDIA = 0xc0286936
+ SIOCGIFLLPRIO = 0xc02069b6
+ SIOCGIFMEDIA = 0xc0386938
SIOCGIFMETRIC = 0xc0206917
SIOCGIFMTU = 0xc020697e
SIOCGIFNETMASK = 0xc0206925
- SIOCGIFPDSTADDR = 0xc0206948
+ SIOCGIFPAIR = 0xc02069b1
+ SIOCGIFPARENT = 0xc02069b3
SIOCGIFPRIORITY = 0xc020699c
- SIOCGIFPSRCADDR = 0xc0206947
SIOCGIFRDOMAIN = 0xc02069a0
SIOCGIFRTLABEL = 0xc0206983
- SIOCGIFTIMESLOT = 0xc0206986
+ SIOCGIFRXR = 0x802069aa
+ SIOCGIFSFFPAGE = 0xc1126939
SIOCGIFXFLAGS = 0xc020699e
- SIOCGLIFADDR = 0xc218691d
SIOCGLIFPHYADDR = 0xc218694b
+ SIOCGLIFPHYDF = 0xc02069c2
+ SIOCGLIFPHYECN = 0xc02069c8
SIOCGLIFPHYRTABLE = 0xc02069a2
SIOCGLIFPHYTTL = 0xc02069a9
- SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309
+ SIOCGPWE3 = 0xc0206998
+ SIOCGPWE3CTRLWORD = 0xc02069dc
+ SIOCGPWE3FAT = 0xc02069dd
+ SIOCGPWE3NEIGHBOR = 0xc21869de
+ SIOCGRXHPRIO = 0xc02069db
SIOCGSPPPPARAMS = 0xc0206994
+ SIOCGTXHPRIO = 0xc02069c6
+ SIOCGUMBINFO = 0xc02069be
+ SIOCGUMBPARAM = 0xc02069c0
SIOCGVH = 0xc02069f6
+ SIOCGVNETFLOWID = 0xc02069c4
SIOCGVNETID = 0xc02069a7
+ SIOCIFAFATTACH = 0x801169ab
+ SIOCIFAFDETACH = 0x801169ac
SIOCIFCREATE = 0x8020697a
SIOCIFDESTROY = 0x80206979
SIOCIFGCLONERS = 0xc00c6978
SIOCSETKALIVE = 0x801869a3
SIOCSETLABEL = 0x80206999
+ SIOCSETMPWCFG = 0x802069ad
SIOCSETPFLOW = 0x802069fd
SIOCSETPFSYNC = 0x802069f7
SIOCSETVLAN = 0x8020698f
- SIOCSHIWAT = 0x80047300
SIOCSIFADDR = 0x8020690c
- SIOCSIFASYNCMAP = 0x8020697d
SIOCSIFBRDADDR = 0x80206913
SIOCSIFDESCR = 0x80206980
SIOCSIFDSTADDR = 0x8020690e
@@ -1209,25 +1387,37 @@ const (
SIOCSIFGATTR = 0x8024698c
SIOCSIFGENERIC = 0x80206939
SIOCSIFLLADDR = 0x8020691f
- SIOCSIFMEDIA = 0xc0206935
+ SIOCSIFLLPRIO = 0x802069b5
+ SIOCSIFMEDIA = 0xc0206937
SIOCSIFMETRIC = 0x80206918
SIOCSIFMTU = 0x8020697f
SIOCSIFNETMASK = 0x80206916
- SIOCSIFPHYADDR = 0x80406946
+ SIOCSIFPAIR = 0x802069b0
+ SIOCSIFPARENT = 0x802069b2
SIOCSIFPRIORITY = 0x8020699b
SIOCSIFRDOMAIN = 0x8020699f
SIOCSIFRTLABEL = 0x80206982
- SIOCSIFTIMESLOT = 0x80206985
SIOCSIFXFLAGS = 0x8020699d
SIOCSLIFPHYADDR = 0x8218694a
+ SIOCSLIFPHYDF = 0x802069c1
+ SIOCSLIFPHYECN = 0x802069c7
SIOCSLIFPHYRTABLE = 0x802069a1
SIOCSLIFPHYTTL = 0x802069a8
- SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308
+ SIOCSPWE3CTRLWORD = 0x802069dc
+ SIOCSPWE3FAT = 0x802069dd
+ SIOCSPWE3NEIGHBOR = 0x821869de
+ SIOCSRXHPRIO = 0x802069db
SIOCSSPPPPARAMS = 0x80206993
+ SIOCSTXHPRIO = 0x802069c5
+ SIOCSUMBPARAM = 0x802069bf
SIOCSVH = 0xc02069f5
+ SIOCSVNETFLOWID = 0x802069c3
SIOCSVNETID = 0x802069a6
+ SOCK_CLOEXEC = 0x8000
SOCK_DGRAM = 0x2
+ SOCK_DNS = 0x1000
+ SOCK_NONBLOCK = 0x4000
SOCK_RAW = 0x3
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
@@ -1238,6 +1428,7 @@ const (
SO_BINDANY = 0x1000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
+ SO_DOMAIN = 0x1024
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
@@ -1245,6 +1436,7 @@ const (
SO_NETPROC = 0x1020
SO_OOBINLINE = 0x100
SO_PEERCRED = 0x1022
+ SO_PROTOCOL = 0x1025
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
@@ -1258,6 +1450,7 @@ const (
SO_TIMESTAMP = 0x800
SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40
+ SO_ZEROIZE = 0x2000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
@@ -1287,9 +1480,24 @@ const (
S_IXOTH = 0x1
S_IXUSR = 0x40
TCIFLUSH = 0x1
+ TCIOFF = 0x3
TCIOFLUSH = 0x3
+ TCION = 0x4
TCOFLUSH = 0x2
- TCP_MAXBURST = 0x4
+ TCOOFF = 0x1
+ TCOON = 0x2
+ TCPOPT_EOL = 0x0
+ TCPOPT_MAXSEG = 0x2
+ TCPOPT_NOP = 0x1
+ TCPOPT_SACK = 0x5
+ TCPOPT_SACK_HDR = 0x1010500
+ TCPOPT_SACK_PERMITTED = 0x4
+ TCPOPT_SACK_PERMIT_HDR = 0x1010402
+ TCPOPT_SIGNATURE = 0x13
+ TCPOPT_TIMESTAMP = 0x8
+ TCPOPT_TSTAMP_HDR = 0x101080a
+ TCPOPT_WINDOW = 0x3
+ TCP_INFO = 0x9
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x3
@@ -1298,11 +1506,15 @@ const (
TCP_MSS = 0x200
TCP_NODELAY = 0x1
TCP_NOPUSH = 0x10
- TCP_NSTATES = 0xb
+ TCP_SACKHOLE_LIMIT = 0x80
TCP_SACK_ENABLE = 0x8
TCSAFLUSH = 0x2
+ TIMER_ABSTIME = 0x1
+ TIMER_RELTIME = 0x0
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
+ TIOCCHKVERAUTH = 0x2000741e
+ TIOCCLRVERAUTH = 0x2000741d
TIOCCONS = 0x80047462
TIOCDRAIN = 0x2000745e
TIOCEXCL = 0x2000740d
@@ -1357,17 +1569,21 @@ const (
TIOCSETAF = 0x802c7416
TIOCSETAW = 0x802c7415
TIOCSETD = 0x8004741b
+ TIOCSETVERAUTH = 0x8004741c
TIOCSFLAGS = 0x8004745c
TIOCSIG = 0x8004745f
TIOCSPGRP = 0x80047476
TIOCSTART = 0x2000746e
- TIOCSTAT = 0x80047465
- TIOCSTI = 0x80017472
+ TIOCSTAT = 0x20007465
TIOCSTOP = 0x2000746f
TIOCSTSTAMP = 0x8008745a
TIOCSWINSZ = 0x80087467
TIOCUCNTL = 0x80047466
+ TIOCUCNTL_CBRK = 0x7a
+ TIOCUCNTL_SBRK = 0x7b
TOSTOP = 0x400000
+ UTIME_NOW = -0x2
+ UTIME_OMIT = -0x1
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0
@@ -1378,6 +1594,19 @@ const (
VKILL = 0x5
VLNEXT = 0xe
VMIN = 0x10
+ VM_ANONMIN = 0x7
+ VM_LOADAVG = 0x2
+ VM_MALLOC_CONF = 0xc
+ VM_MAXID = 0xd
+ VM_MAXSLP = 0xa
+ VM_METER = 0x1
+ VM_NKMEMPAGES = 0x6
+ VM_PSSTRINGS = 0x3
+ VM_SWAPENCRYPT = 0x5
+ VM_USPACE = 0xb
+ VM_UVMEXP = 0x4
+ VM_VNODEMIN = 0x9
+ VM_VTEXTMIN = 0x8
VQUIT = 0x9
VREPRINT = 0x6
VSTART = 0xc
@@ -1390,8 +1619,8 @@ const (
WCONTINUED = 0x8
WCOREFLAG = 0x80
WNOHANG = 0x1
- WSTOPPED = 0x7f
WUNTRACED = 0x2
+ XCASE = 0x1000000
)
// Errors
@@ -1405,6 +1634,7 @@ const (
EALREADY = syscall.Errno(0x25)
EAUTH = syscall.Errno(0x50)
EBADF = syscall.Errno(0x9)
+ EBADMSG = syscall.Errno(0x5c)
EBADRPC = syscall.Errno(0x48)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x58)
@@ -1431,7 +1661,7 @@ const (
EIPSEC = syscall.Errno(0x52)
EISCONN = syscall.Errno(0x38)
EISDIR = syscall.Errno(0x15)
- ELAST = syscall.Errno(0x5b)
+ ELAST = syscall.Errno(0x5f)
ELOOP = syscall.Errno(0x3e)
EMEDIUMTYPE = syscall.Errno(0x56)
EMFILE = syscall.Errno(0x18)
@@ -1459,12 +1689,14 @@ const (
ENOTCONN = syscall.Errno(0x39)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x42)
+ ENOTRECOVERABLE = syscall.Errno(0x5d)
ENOTSOCK = syscall.Errno(0x26)
ENOTSUP = syscall.Errno(0x5b)
ENOTTY = syscall.Errno(0x19)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x2d)
EOVERFLOW = syscall.Errno(0x57)
+ EOWNERDEAD = syscall.Errno(0x5e)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x2e)
EPIPE = syscall.Errno(0x20)
@@ -1472,6 +1704,7 @@ const (
EPROCUNAVAIL = syscall.Errno(0x4c)
EPROGMISMATCH = syscall.Errno(0x4b)
EPROGUNAVAIL = syscall.Errno(0x4a)
+ EPROTO = syscall.Errno(0x5f)
EPROTONOSUPPORT = syscall.Errno(0x2b)
EPROTOTYPE = syscall.Errno(0x29)
ERANGE = syscall.Errno(0x22)
@@ -1568,7 +1801,7 @@ var errorList = [...]struct {
{32, "EPIPE", "broken pipe"},
{33, "EDOM", "numerical argument out of domain"},
{34, "ERANGE", "result too large"},
- {35, "EWOULDBLOCK", "resource temporarily unavailable"},
+ {35, "EAGAIN", "resource temporarily unavailable"},
{36, "EINPROGRESS", "operation now in progress"},
{37, "EALREADY", "operation already in progress"},
{38, "ENOTSOCK", "socket operation on non-socket"},
@@ -1624,7 +1857,11 @@ var errorList = [...]struct {
{88, "ECANCELED", "operation canceled"},
{89, "EIDRM", "identifier removed"},
{90, "ENOMSG", "no message of desired type"},
- {91, "ELAST", "not supported"},
+ {91, "ENOTSUP", "not supported"},
+ {92, "EBADMSG", "bad message"},
+ {93, "ENOTRECOVERABLE", "state not recoverable"},
+ {94, "EOWNERDEAD", "previous owner died"},
+ {95, "ELAST", "protocol error"},
}
// Signal table
@@ -1638,7 +1875,7 @@ var signalList = [...]struct {
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/BPT trap"},
- {6, "SIGABRT", "abort trap"},
+ {6, "SIGIOT", "abort trap"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
@@ -1665,4 +1902,5 @@ var signalList = [...]struct {
{30, "SIGUSR1", "user defined signal 1"},
{31, "SIGUSR2", "user defined signal 2"},
{32, "SIGTHR", "thread AST"},
+ {28672, "SIGSTKSZ", "unknown signal"},
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
index 25cb6094813c..6015fcb2bf69 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
@@ -109,6 +109,15 @@ const (
BPF_DIRECTION_IN = 0x1
BPF_DIRECTION_OUT = 0x2
BPF_DIV = 0x30
+ BPF_FILDROP_CAPTURE = 0x1
+ BPF_FILDROP_DROP = 0x2
+ BPF_FILDROP_PASS = 0x0
+ BPF_F_DIR_IN = 0x10
+ BPF_F_DIR_MASK = 0x30
+ BPF_F_DIR_OUT = 0x20
+ BPF_F_DIR_SHIFT = 0x4
+ BPF_F_FLOWID = 0x8
+ BPF_F_PRI_MASK = 0x7
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -137,6 +146,7 @@ const (
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
+ BPF_RND = 0xc0
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
@@ -177,7 +187,65 @@ const (
CTL_KERN = 0x1
CTL_MAXNAME = 0xc
CTL_NET = 0x4
+ DIOCADDQUEUE = 0xc110445d
+ DIOCADDRULE = 0xcd604404
+ DIOCADDSTATE = 0xc1084425
+ DIOCCHANGERULE = 0xcd60441a
+ DIOCCLRIFFLAG = 0xc028445a
+ DIOCCLRSRCNODES = 0x20004455
+ DIOCCLRSTATES = 0xc0e04412
+ DIOCCLRSTATUS = 0xc0284416
+ DIOCGETLIMIT = 0xc0084427
+ DIOCGETQSTATS = 0xc1204460
+ DIOCGETQUEUE = 0xc110445f
+ DIOCGETQUEUES = 0xc110445e
+ DIOCGETRULE = 0xcd604407
+ DIOCGETRULES = 0xcd604406
+ DIOCGETRULESET = 0xc444443b
+ DIOCGETRULESETS = 0xc444443a
+ DIOCGETSRCNODES = 0xc0104454
+ DIOCGETSTATE = 0xc1084413
+ DIOCGETSTATES = 0xc0104419
+ DIOCGETSTATUS = 0xc1e84415
+ DIOCGETSYNFLWATS = 0xc0084463
+ DIOCGETTIMEOUT = 0xc008441e
+ DIOCIGETIFACES = 0xc0284457
+ DIOCKILLSRCNODES = 0xc080445b
+ DIOCKILLSTATES = 0xc0e04429
+ DIOCNATLOOK = 0xc0504417
+ DIOCOSFPADD = 0xc088444f
DIOCOSFPFLUSH = 0x2000444e
+ DIOCOSFPGET = 0xc0884450
+ DIOCRADDADDRS = 0xc4504443
+ DIOCRADDTABLES = 0xc450443d
+ DIOCRCLRADDRS = 0xc4504442
+ DIOCRCLRASTATS = 0xc4504448
+ DIOCRCLRTABLES = 0xc450443c
+ DIOCRCLRTSTATS = 0xc4504441
+ DIOCRDELADDRS = 0xc4504444
+ DIOCRDELTABLES = 0xc450443e
+ DIOCRGETADDRS = 0xc4504446
+ DIOCRGETASTATS = 0xc4504447
+ DIOCRGETTABLES = 0xc450443f
+ DIOCRGETTSTATS = 0xc4504440
+ DIOCRINADEFINE = 0xc450444d
+ DIOCRSETADDRS = 0xc4504445
+ DIOCRSETTFLAGS = 0xc450444a
+ DIOCRTSTADDRS = 0xc4504449
+ DIOCSETDEBUG = 0xc0044418
+ DIOCSETHOSTID = 0xc0044456
+ DIOCSETIFFLAG = 0xc0284459
+ DIOCSETLIMIT = 0xc0084428
+ DIOCSETREASS = 0xc004445c
+ DIOCSETSTATUSIF = 0xc0284414
+ DIOCSETSYNCOOKIES = 0xc0014462
+ DIOCSETSYNFLWATS = 0xc0084461
+ DIOCSETTIMEOUT = 0xc008441d
+ DIOCSTART = 0x20004401
+ DIOCSTOP = 0x20004402
+ DIOCXBEGIN = 0xc0104451
+ DIOCXCOMMIT = 0xc0104452
+ DIOCXROLLBACK = 0xc0104453
DLT_ARCNET = 0x7
DLT_ATM_RFC1483 = 0xb
DLT_AX25 = 0x3
@@ -240,6 +308,8 @@ const (
EMUL_ENABLED = 0x1
EMUL_NATIVE = 0x2
ENDRUNDISC = 0x9
+ ETH64_8021_RSVD_MASK = 0xfffffffffff0
+ ETH64_8021_RSVD_PREFIX = 0x180c2000000
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERTYPE_8023 = 0x4
@@ -292,6 +362,7 @@ const (
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_EAPOL = 0x888e
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
@@ -323,6 +394,7 @@ const (
ETHERTYPE_LLDP = 0x88cc
ETHERTYPE_LOGICRAFT = 0x8148
ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MACSEC = 0x88e5
ETHERTYPE_MATRA = 0x807a
ETHERTYPE_MAX = 0xffff
ETHERTYPE_MERIT = 0x807c
@@ -351,15 +423,17 @@ const (
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NHRP = 0x2001
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NSH = 0x984f
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
- ETHERTYPE_PAE = 0x888e
+ ETHERTYPE_PBB = 0x88e7
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
ETHERTYPE_PPP = 0x880b
@@ -441,10 +515,11 @@ const (
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = -0x3
EVFILT_DEVICE = -0x8
+ EVFILT_EXCEPT = -0x9
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
- EVFILT_SYSCOUNT = 0x8
+ EVFILT_SYSCOUNT = 0x9
EVFILT_TIMER = -0x7
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
@@ -466,7 +541,7 @@ const (
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_RECEIPT = 0x40
- EV_SYSFLAGS = 0xf000
+ EV_SYSFLAGS = 0xf800
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
@@ -732,6 +807,7 @@ const (
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
+ IFT_WIREGUARD = 0xfb
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
@@ -797,9 +873,11 @@ const (
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
IPV6_AUTH_LEVEL = 0x35
IPV6_AUTOFLOWLABEL = 0x3b
IPV6_CHECKSUM = 0x1a
@@ -906,6 +984,9 @@ const (
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
+ ITIMER_PROF = 0x2
+ ITIMER_REAL = 0x0
+ ITIMER_VIRTUAL = 0x1
IUCLC = 0x1000
IXANY = 0x800
IXOFF = 0x400
@@ -970,12 +1051,26 @@ const (
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x4000000
MNT_STALLED = 0x100000
+ MNT_SWAPPABLE = 0x200000
MNT_SYNCHRONOUS = 0x2
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x400ffff
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
+ MOUNT_AFS = "afs"
+ MOUNT_CD9660 = "cd9660"
+ MOUNT_EXT2FS = "ext2fs"
+ MOUNT_FFS = "ffs"
+ MOUNT_FUSEFS = "fuse"
+ MOUNT_MFS = "mfs"
+ MOUNT_MSDOS = "msdos"
+ MOUNT_NCPFS = "ncpfs"
+ MOUNT_NFS = "nfs"
+ MOUNT_NTFS = "ntfs"
+ MOUNT_TMPFS = "tmpfs"
+ MOUNT_UDF = "udf"
+ MOUNT_UFS = "ffs"
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
@@ -988,6 +1083,7 @@ const (
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
+ MSG_WAITFORONE = 0x1000
MS_ASYNC = 0x1
MS_INVALIDATE = 0x4
MS_SYNC = 0x2
@@ -996,7 +1092,8 @@ const (
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
NET_RT_IFNAMES = 0x6
- NET_RT_MAXID = 0x7
+ NET_RT_MAXID = 0x8
+ NET_RT_SOURCE = 0x7
NET_RT_STATS = 0x4
NET_RT_TABLE = 0x5
NFDBITS = 0x20
@@ -1013,6 +1110,7 @@ const (
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
+ NOTE_OOB = 0x4
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
@@ -1130,9 +1228,11 @@ const (
RTF_STATIC = 0x800
RTF_UP = 0x1
RTF_USETRAILERS = 0x8000
+ RTM_80211INFO = 0x15
RTM_ADD = 0x1
RTM_BFD = 0x12
RTM_CHANGE = 0x3
+ RTM_CHGADDRATTR = 0x14
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_DESYNC = 0x10
@@ -1140,7 +1240,6 @@ const (
RTM_IFANNOUNCE = 0xf
RTM_IFINFO = 0xe
RTM_INVALIDATE = 0x11
- RTM_LOCK = 0x8
RTM_LOSING = 0x5
RTM_MAXSIZE = 0x800
RTM_MISS = 0x7
@@ -1148,7 +1247,7 @@ const (
RTM_PROPOSAL = 0x13
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
- RTM_RTTUNIT = 0xf4240
+ RTM_SOURCE = 0x16
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
@@ -1166,6 +1265,9 @@ const (
RUSAGE_THREAD = 0x1
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x4
+ SEEK_CUR = 0x1
+ SEEK_END = 0x2
+ SEEK_SET = 0x0
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1182,35 +1284,37 @@ const (
SIOCBRDGDELS = 0x80606942
SIOCBRDGFLUSH = 0x80606948
SIOCBRDGFRL = 0x808c694e
- SIOCBRDGGCACHE = 0xc0186941
- SIOCBRDGGFD = 0xc0186952
- SIOCBRDGGHT = 0xc0186951
+ SIOCBRDGGCACHE = 0xc0146941
+ SIOCBRDGGFD = 0xc0146952
+ SIOCBRDGGHT = 0xc0146951
SIOCBRDGGIFFLGS = 0xc060693e
- SIOCBRDGGMA = 0xc0186953
+ SIOCBRDGGMA = 0xc0146953
SIOCBRDGGPARAM = 0xc0406958
- SIOCBRDGGPRI = 0xc0186950
+ SIOCBRDGGPRI = 0xc0146950
SIOCBRDGGRL = 0xc030694f
- SIOCBRDGGTO = 0xc0186946
+ SIOCBRDGGTO = 0xc0146946
SIOCBRDGIFS = 0xc0606942
SIOCBRDGRTS = 0xc0206943
SIOCBRDGSADDR = 0xc1286944
- SIOCBRDGSCACHE = 0x80186940
- SIOCBRDGSFD = 0x80186952
- SIOCBRDGSHT = 0x80186951
+ SIOCBRDGSCACHE = 0x80146940
+ SIOCBRDGSFD = 0x80146952
+ SIOCBRDGSHT = 0x80146951
SIOCBRDGSIFCOST = 0x80606955
SIOCBRDGSIFFLGS = 0x8060693f
SIOCBRDGSIFPRIO = 0x80606954
SIOCBRDGSIFPROT = 0x8060694a
- SIOCBRDGSMA = 0x80186953
- SIOCBRDGSPRI = 0x80186950
- SIOCBRDGSPROTO = 0x8018695a
- SIOCBRDGSTO = 0x80186945
- SIOCBRDGSTXHC = 0x80186959
+ SIOCBRDGSMA = 0x80146953
+ SIOCBRDGSPRI = 0x80146950
+ SIOCBRDGSPROTO = 0x8014695a
+ SIOCBRDGSTO = 0x80146945
+ SIOCBRDGSTXHC = 0x80146959
+ SIOCDELLABEL = 0x80206997
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
SIOCDIFGROUP = 0x80286989
SIOCDIFPARENT = 0x802069b4
SIOCDIFPHYADDR = 0x80206949
+ SIOCDPWE3NEIGHBOR = 0x802069de
SIOCDVNETID = 0x802069af
SIOCGETKALIVE = 0xc01869a4
SIOCGETLABEL = 0x8020699a
@@ -1229,6 +1333,7 @@ const (
SIOCGIFFLAGS = 0xc0206911
SIOCGIFGATTR = 0xc028698b
SIOCGIFGENERIC = 0xc020693a
+ SIOCGIFGLIST = 0xc028698d
SIOCGIFGMEMB = 0xc028698a
SIOCGIFGROUP = 0xc0286988
SIOCGIFHARDMTU = 0xc02069a5
@@ -1243,13 +1348,21 @@ const (
SIOCGIFRDOMAIN = 0xc02069a0
SIOCGIFRTLABEL = 0xc0206983
SIOCGIFRXR = 0x802069aa
+ SIOCGIFSFFPAGE = 0xc1126939
SIOCGIFXFLAGS = 0xc020699e
SIOCGLIFPHYADDR = 0xc218694b
SIOCGLIFPHYDF = 0xc02069c2
+ SIOCGLIFPHYECN = 0xc02069c8
SIOCGLIFPHYRTABLE = 0xc02069a2
SIOCGLIFPHYTTL = 0xc02069a9
SIOCGPGRP = 0x40047309
+ SIOCGPWE3 = 0xc0206998
+ SIOCGPWE3CTRLWORD = 0xc02069dc
+ SIOCGPWE3FAT = 0xc02069dd
+ SIOCGPWE3NEIGHBOR = 0xc21869de
+ SIOCGRXHPRIO = 0xc02069db
SIOCGSPPPPARAMS = 0xc0206994
+ SIOCGTXHPRIO = 0xc02069c6
SIOCGUMBINFO = 0xc02069be
SIOCGUMBPARAM = 0xc02069c0
SIOCGVH = 0xc02069f6
@@ -1287,19 +1400,20 @@ const (
SIOCSIFXFLAGS = 0x8020699d
SIOCSLIFPHYADDR = 0x8218694a
SIOCSLIFPHYDF = 0x802069c1
+ SIOCSLIFPHYECN = 0x802069c7
SIOCSLIFPHYRTABLE = 0x802069a1
SIOCSLIFPHYTTL = 0x802069a8
SIOCSPGRP = 0x80047308
+ SIOCSPWE3CTRLWORD = 0x802069dc
+ SIOCSPWE3FAT = 0x802069dd
+ SIOCSPWE3NEIGHBOR = 0x821869de
+ SIOCSRXHPRIO = 0x802069db
SIOCSSPPPPARAMS = 0x80206993
+ SIOCSTXHPRIO = 0x802069c5
SIOCSUMBPARAM = 0x802069bf
SIOCSVH = 0xc02069f5
SIOCSVNETFLOWID = 0x802069c3
SIOCSVNETID = 0x802069a6
- SIOCSWGDPID = 0xc018695b
- SIOCSWGMAXFLOW = 0xc0186960
- SIOCSWGMAXGROUP = 0xc018695d
- SIOCSWSDPID = 0x8018695c
- SIOCSWSPORTNO = 0xc060695f
SOCK_CLOEXEC = 0x8000
SOCK_DGRAM = 0x2
SOCK_DNS = 0x1000
@@ -1314,6 +1428,7 @@ const (
SO_BINDANY = 0x1000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
+ SO_DOMAIN = 0x1024
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
@@ -1321,6 +1436,7 @@ const (
SO_NETPROC = 0x1020
SO_OOBINLINE = 0x100
SO_PEERCRED = 0x1022
+ SO_PROTOCOL = 0x1025
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
@@ -1370,7 +1486,18 @@ const (
TCOFLUSH = 0x2
TCOOFF = 0x1
TCOON = 0x2
- TCP_MAXBURST = 0x4
+ TCPOPT_EOL = 0x0
+ TCPOPT_MAXSEG = 0x2
+ TCPOPT_NOP = 0x1
+ TCPOPT_SACK = 0x5
+ TCPOPT_SACK_HDR = 0x1010500
+ TCPOPT_SACK_PERMITTED = 0x4
+ TCPOPT_SACK_PERMIT_HDR = 0x1010402
+ TCPOPT_SIGNATURE = 0x13
+ TCPOPT_TIMESTAMP = 0x8
+ TCPOPT_TSTAMP_HDR = 0x101080a
+ TCPOPT_WINDOW = 0x3
+ TCP_INFO = 0x9
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x3
@@ -1379,8 +1506,11 @@ const (
TCP_MSS = 0x200
TCP_NODELAY = 0x1
TCP_NOPUSH = 0x10
+ TCP_SACKHOLE_LIMIT = 0x80
TCP_SACK_ENABLE = 0x8
TCSAFLUSH = 0x2
+ TIMER_ABSTIME = 0x1
+ TIMER_RELTIME = 0x0
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCHKVERAUTH = 0x2000741e
@@ -1445,7 +1575,6 @@ const (
TIOCSPGRP = 0x80047476
TIOCSTART = 0x2000746e
TIOCSTAT = 0x20007465
- TIOCSTI = 0x80017472
TIOCSTOP = 0x2000746f
TIOCSTSTAMP = 0x8008745a
TIOCSWINSZ = 0x80087467
@@ -1467,7 +1596,8 @@ const (
VMIN = 0x10
VM_ANONMIN = 0x7
VM_LOADAVG = 0x2
- VM_MAXID = 0xc
+ VM_MALLOC_CONF = 0xc
+ VM_MAXID = 0xd
VM_MAXSLP = 0xa
VM_METER = 0x1
VM_NKMEMPAGES = 0x6
@@ -1745,7 +1875,7 @@ var signalList = [...]struct {
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/BPT trap"},
- {6, "SIGABRT", "abort trap"},
+ {6, "SIGIOT", "abort trap"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
@@ -1772,4 +1902,5 @@ var signalList = [...]struct {
{30, "SIGUSR1", "user defined signal 1"},
{31, "SIGUSR2", "user defined signal 2"},
{32, "SIGTHR", "thread AST"},
+ {28672, "SIGSTKSZ", "unknown signal"},
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
index aef6c085609a..8d44955e44d8 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
@@ -46,6 +46,7 @@ const (
AF_SNA = 0xb
AF_UNIX = 0x1
AF_UNSPEC = 0x0
+ ALTWERASE = 0x200
ARPHRD_ETHER = 0x1
ARPHRD_FRELAY = 0xf
ARPHRD_IEEE1394 = 0x18
@@ -82,7 +83,7 @@ const (
BIOCGFILDROP = 0x40044278
BIOCGHDRCMPLT = 0x40044274
BIOCGRSIG = 0x40044273
- BIOCGRTIMEOUT = 0x400c426e
+ BIOCGRTIMEOUT = 0x4010426e
BIOCGSTATS = 0x4008426f
BIOCIMMEDIATE = 0x80044270
BIOCLOCK = 0x20004276
@@ -96,7 +97,7 @@ const (
BIOCSFILDROP = 0x80044279
BIOCSHDRCMPLT = 0x80044275
BIOCSRSIG = 0x80044272
- BIOCSRTIMEOUT = 0x800c426d
+ BIOCSRTIMEOUT = 0x8010426d
BIOCVERSION = 0x40044271
BPF_A = 0x10
BPF_ABS = 0x20
@@ -108,6 +109,15 @@ const (
BPF_DIRECTION_IN = 0x1
BPF_DIRECTION_OUT = 0x2
BPF_DIV = 0x30
+ BPF_FILDROP_CAPTURE = 0x1
+ BPF_FILDROP_DROP = 0x2
+ BPF_FILDROP_PASS = 0x0
+ BPF_F_DIR_IN = 0x10
+ BPF_F_DIR_MASK = 0x30
+ BPF_F_DIR_OUT = 0x20
+ BPF_F_DIR_SHIFT = 0x4
+ BPF_F_FLOWID = 0x8
+ BPF_F_PRI_MASK = 0x7
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -136,6 +146,7 @@ const (
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
+ BPF_RND = 0xc0
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
@@ -147,6 +158,12 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CLOCK_BOOTTIME = 0x6
+ CLOCK_MONOTONIC = 0x3
+ CLOCK_PROCESS_CPUTIME_ID = 0x2
+ CLOCK_REALTIME = 0x0
+ CLOCK_THREAD_CPUTIME_ID = 0x4
+ CLOCK_UPTIME = 0x5
CPUSTATES = 0x6
CP_IDLE = 0x5
CP_INTR = 0x4
@@ -170,7 +187,65 @@ const (
CTL_KERN = 0x1
CTL_MAXNAME = 0xc
CTL_NET = 0x4
+ DIOCADDQUEUE = 0xc100445d
+ DIOCADDRULE = 0xcce04404
+ DIOCADDSTATE = 0xc1084425
+ DIOCCHANGERULE = 0xcce0441a
+ DIOCCLRIFFLAG = 0xc024445a
+ DIOCCLRSRCNODES = 0x20004455
+ DIOCCLRSTATES = 0xc0d04412
+ DIOCCLRSTATUS = 0xc0244416
+ DIOCGETLIMIT = 0xc0084427
+ DIOCGETQSTATS = 0xc1084460
+ DIOCGETQUEUE = 0xc100445f
+ DIOCGETQUEUES = 0xc100445e
+ DIOCGETRULE = 0xcce04407
+ DIOCGETRULES = 0xcce04406
+ DIOCGETRULESET = 0xc444443b
+ DIOCGETRULESETS = 0xc444443a
+ DIOCGETSRCNODES = 0xc0084454
+ DIOCGETSTATE = 0xc1084413
+ DIOCGETSTATES = 0xc0084419
+ DIOCGETSTATUS = 0xc1e84415
+ DIOCGETSYNFLWATS = 0xc0084463
+ DIOCGETTIMEOUT = 0xc008441e
+ DIOCIGETIFACES = 0xc0244457
+ DIOCKILLSRCNODES = 0xc068445b
+ DIOCKILLSTATES = 0xc0d04429
+ DIOCNATLOOK = 0xc0504417
+ DIOCOSFPADD = 0xc088444f
DIOCOSFPFLUSH = 0x2000444e
+ DIOCOSFPGET = 0xc0884450
+ DIOCRADDADDRS = 0xc44c4443
+ DIOCRADDTABLES = 0xc44c443d
+ DIOCRCLRADDRS = 0xc44c4442
+ DIOCRCLRASTATS = 0xc44c4448
+ DIOCRCLRTABLES = 0xc44c443c
+ DIOCRCLRTSTATS = 0xc44c4441
+ DIOCRDELADDRS = 0xc44c4444
+ DIOCRDELTABLES = 0xc44c443e
+ DIOCRGETADDRS = 0xc44c4446
+ DIOCRGETASTATS = 0xc44c4447
+ DIOCRGETTABLES = 0xc44c443f
+ DIOCRGETTSTATS = 0xc44c4440
+ DIOCRINADEFINE = 0xc44c444d
+ DIOCRSETADDRS = 0xc44c4445
+ DIOCRSETTFLAGS = 0xc44c444a
+ DIOCRTSTADDRS = 0xc44c4449
+ DIOCSETDEBUG = 0xc0044418
+ DIOCSETHOSTID = 0xc0044456
+ DIOCSETIFFLAG = 0xc0244459
+ DIOCSETLIMIT = 0xc0084428
+ DIOCSETREASS = 0xc004445c
+ DIOCSETSTATUSIF = 0xc0244414
+ DIOCSETSYNCOOKIES = 0xc0014462
+ DIOCSETSYNFLWATS = 0xc0084461
+ DIOCSETTIMEOUT = 0xc008441d
+ DIOCSTART = 0x20004401
+ DIOCSTOP = 0x20004402
+ DIOCXBEGIN = 0xc00c4451
+ DIOCXCOMMIT = 0xc00c4452
+ DIOCXROLLBACK = 0xc00c4453
DLT_ARCNET = 0x7
DLT_ATM_RFC1483 = 0xb
DLT_AX25 = 0x3
@@ -186,6 +261,7 @@ const (
DLT_LOOP = 0xc
DLT_MPLS = 0xdb
DLT_NULL = 0x0
+ DLT_OPENFLOW = 0x10b
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x12
DLT_PPP = 0x9
@@ -196,6 +272,23 @@ const (
DLT_RAW = 0xe
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xf
+ DLT_USBPCAP = 0xf9
+ DLT_USER0 = 0x93
+ DLT_USER1 = 0x94
+ DLT_USER10 = 0x9d
+ DLT_USER11 = 0x9e
+ DLT_USER12 = 0x9f
+ DLT_USER13 = 0xa0
+ DLT_USER14 = 0xa1
+ DLT_USER15 = 0xa2
+ DLT_USER2 = 0x95
+ DLT_USER3 = 0x96
+ DLT_USER4 = 0x97
+ DLT_USER5 = 0x98
+ DLT_USER6 = 0x99
+ DLT_USER7 = 0x9a
+ DLT_USER8 = 0x9b
+ DLT_USER9 = 0x9c
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
@@ -215,6 +308,8 @@ const (
EMUL_ENABLED = 0x1
EMUL_NATIVE = 0x2
ENDRUNDISC = 0x9
+ ETH64_8021_RSVD_MASK = 0xfffffffffff0
+ ETH64_8021_RSVD_PREFIX = 0x180c2000000
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERTYPE_8023 = 0x4
@@ -267,6 +362,7 @@ const (
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_EAPOL = 0x888e
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
@@ -298,6 +394,7 @@ const (
ETHERTYPE_LLDP = 0x88cc
ETHERTYPE_LOGICRAFT = 0x8148
ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MACSEC = 0x88e5
ETHERTYPE_MATRA = 0x807a
ETHERTYPE_MAX = 0xffff
ETHERTYPE_MERIT = 0x807c
@@ -326,15 +423,17 @@ const (
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NHRP = 0x2001
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NSH = 0x984f
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
- ETHERTYPE_PAE = 0x888e
+ ETHERTYPE_PBB = 0x88e7
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
ETHERTYPE_PPP = 0x880b
@@ -409,28 +508,40 @@ const (
ETHER_CRC_POLY_LE = 0xedb88320
ETHER_HDR_LEN = 0xe
ETHER_MAX_DIX_LEN = 0x600
+ ETHER_MAX_HARDMTU_LEN = 0xff9b
ETHER_MAX_LEN = 0x5ee
ETHER_MIN_LEN = 0x40
ETHER_TYPE_LEN = 0x2
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = -0x3
+ EVFILT_DEVICE = -0x8
+ EVFILT_EXCEPT = -0x9
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
- EVFILT_SYSCOUNT = 0x7
+ EVFILT_SYSCOUNT = 0x9
EVFILT_TIMER = -0x7
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
+ EVL_ENCAPLEN = 0x4
+ EVL_PRIO_BITS = 0xd
+ EVL_PRIO_MAX = 0x7
+ EVL_VLID_MASK = 0xfff
+ EVL_VLID_MAX = 0xffe
+ EVL_VLID_MIN = 0x1
+ EVL_VLID_NULL = 0x0
EV_ADD = 0x1
EV_CLEAR = 0x20
EV_DELETE = 0x2
EV_DISABLE = 0x8
+ EV_DISPATCH = 0x80
EV_ENABLE = 0x4
EV_EOF = 0x8000
EV_ERROR = 0x4000
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
- EV_SYSFLAGS = 0xf000
+ EV_RECEIPT = 0x40
+ EV_SYSFLAGS = 0xf800
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
@@ -443,6 +554,8 @@ const (
F_GETFL = 0x3
F_GETLK = 0x7
F_GETOWN = 0x5
+ F_ISATTY = 0xb
+ F_OK = 0x0
F_RDLCK = 0x1
F_SETFD = 0x2
F_SETFL = 0x4
@@ -459,7 +572,6 @@ const (
IEXTEN = 0x400
IFAN_ARRIVAL = 0x0
IFAN_DEPARTURE = 0x1
- IFA_ROUTE = 0x1
IFF_ALLMULTI = 0x200
IFF_BROADCAST = 0x2
IFF_CANTCHANGE = 0x8e52
@@ -470,12 +582,12 @@ const (
IFF_LOOPBACK = 0x8
IFF_MULTICAST = 0x8000
IFF_NOARP = 0x80
- IFF_NOTRAILERS = 0x20
IFF_OACTIVE = 0x400
IFF_POINTOPOINT = 0x10
IFF_PROMISC = 0x100
IFF_RUNNING = 0x40
IFF_SIMPLEX = 0x800
+ IFF_STATICARP = 0x20
IFF_UP = 0x1
IFNAMSIZ = 0x10
IFT_1822 = 0x2
@@ -604,6 +716,7 @@ const (
IFT_LINEGROUP = 0xd2
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
+ IFT_MBIM = 0xfa
IFT_MEDIAMAILOVERIP = 0x8b
IFT_MFSIGLINK = 0xa7
IFT_MIOX25 = 0x26
@@ -694,6 +807,7 @@ const (
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
+ IFT_WIREGUARD = 0xfb
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
@@ -728,8 +842,6 @@ const (
IPPROTO_AH = 0x33
IPPROTO_CARP = 0x70
IPPROTO_DIVERT = 0x102
- IPPROTO_DIVERT_INIT = 0x2
- IPPROTO_DIVERT_RESP = 0x1
IPPROTO_DONE = 0x101
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
@@ -761,9 +873,11 @@ const (
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
IPV6_AUTH_LEVEL = 0x35
IPV6_AUTOFLOWLABEL = 0x3b
IPV6_CHECKSUM = 0x1a
@@ -786,6 +900,7 @@ const (
IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff
IPV6_MAXPACKET = 0xffff
+ IPV6_MINHOPCOUNT = 0x41
IPV6_MMTU = 0x500
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_IF = 0x9
@@ -825,12 +940,12 @@ const (
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
- IP_DIVERTFL = 0x1022
IP_DROP_MEMBERSHIP = 0xd
IP_ESP_NETWORK_LEVEL = 0x16
IP_ESP_TRANS_LEVEL = 0x15
IP_HDRINCL = 0x2
IP_IPCOMP_LEVEL = 0x1d
+ IP_IPDEFTTL = 0x25
IP_IPSECFLOWINFO = 0x24
IP_IPSEC_LOCAL_AUTH = 0x1b
IP_IPSEC_LOCAL_CRED = 0x19
@@ -864,10 +979,15 @@ const (
IP_RETOPTS = 0x8
IP_RF = 0x8000
IP_RTABLE = 0x1021
+ IP_SENDSRCADDR = 0x7
IP_TOS = 0x3
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
+ ITIMER_PROF = 0x2
+ ITIMER_REAL = 0x0
+ ITIMER_VIRTUAL = 0x1
+ IUCLC = 0x1000
IXANY = 0x800
IXOFF = 0x400
IXON = 0x200
@@ -922,6 +1042,7 @@ const (
MNT_NOATIME = 0x8000
MNT_NODEV = 0x10
MNT_NOEXEC = 0x4
+ MNT_NOPERM = 0x20
MNT_NOSUID = 0x8
MNT_NOWAIT = 0x2
MNT_QUOTA = 0x2000
@@ -929,12 +1050,27 @@ const (
MNT_RELOAD = 0x40000
MNT_ROOTFS = 0x4000
MNT_SOFTDEP = 0x4000000
+ MNT_STALLED = 0x100000
+ MNT_SWAPPABLE = 0x200000
MNT_SYNCHRONOUS = 0x2
MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x400ffff
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
+ MOUNT_AFS = "afs"
+ MOUNT_CD9660 = "cd9660"
+ MOUNT_EXT2FS = "ext2fs"
+ MOUNT_FFS = "ffs"
+ MOUNT_FUSEFS = "fuse"
+ MOUNT_MFS = "mfs"
+ MOUNT_MSDOS = "msdos"
+ MOUNT_NCPFS = "ncpfs"
+ MOUNT_NFS = "nfs"
+ MOUNT_NTFS = "ntfs"
+ MOUNT_TMPFS = "tmpfs"
+ MOUNT_UDF = "udf"
+ MOUNT_UFS = "ffs"
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
@@ -947,6 +1083,7 @@ const (
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
+ MSG_WAITFORONE = 0x1000
MS_ASYNC = 0x1
MS_INVALIDATE = 0x4
MS_SYNC = 0x2
@@ -954,12 +1091,16 @@ const (
NET_RT_DUMP = 0x1
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
- NET_RT_MAXID = 0x6
+ NET_RT_IFNAMES = 0x6
+ NET_RT_MAXID = 0x8
+ NET_RT_SOURCE = 0x7
NET_RT_STATS = 0x4
NET_RT_TABLE = 0x5
NFDBITS = 0x20
NOFLSH = 0x80000000
+ NOKERNINFO = 0x2000000
NOTE_ATTRIB = 0x8
+ NOTE_CHANGE = 0x1
NOTE_CHILD = 0x4
NOTE_DELETE = 0x1
NOTE_EOF = 0x2
@@ -969,6 +1110,7 @@ const (
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
+ NOTE_OOB = 0x4
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
@@ -978,11 +1120,13 @@ const (
NOTE_TRUNCATE = 0x80
NOTE_WRITE = 0x2
OCRNL = 0x10
+ OLCUC = 0x20
ONLCR = 0x2
ONLRET = 0x80
ONOCR = 0x40
ONOEOT = 0x8
OPOST = 0x1
+ OXTABS = 0x4
O_ACCMODE = 0x3
O_APPEND = 0x8
O_ASYNC = 0x40
@@ -1027,19 +1171,25 @@ const (
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6
+ RTAX_BFD = 0xb
RTAX_BRD = 0x7
+ RTAX_DNS = 0xc
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_LABEL = 0xa
- RTAX_MAX = 0xb
+ RTAX_MAX = 0xf
RTAX_NETMASK = 0x2
+ RTAX_SEARCH = 0xe
RTAX_SRC = 0x8
RTAX_SRCMASK = 0x9
+ RTAX_STATIC = 0xd
RTA_AUTHOR = 0x40
+ RTA_BFD = 0x800
RTA_BRD = 0x80
+ RTA_DNS = 0x1000
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
@@ -1047,24 +1197,29 @@ const (
RTA_IFP = 0x10
RTA_LABEL = 0x400
RTA_NETMASK = 0x4
+ RTA_SEARCH = 0x4000
RTA_SRC = 0x100
RTA_SRCMASK = 0x200
+ RTA_STATIC = 0x2000
RTF_ANNOUNCE = 0x4000
+ RTF_BFD = 0x1000000
RTF_BLACKHOLE = 0x1000
RTF_BROADCAST = 0x400000
+ RTF_CACHED = 0x20000
RTF_CLONED = 0x10000
RTF_CLONING = 0x100
+ RTF_CONNECTED = 0x800000
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
- RTF_FMASK = 0x70f808
+ RTF_FMASK = 0x110fc08
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_LLINFO = 0x400
RTF_LOCAL = 0x200000
- RTF_MASK = 0x80
RTF_MODIFIED = 0x20
RTF_MPATH = 0x40000
RTF_MPLS = 0x100000
+ RTF_MULTICAST = 0x200
RTF_PERMANENT_ARP = 0x2000
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
@@ -1073,23 +1228,26 @@ const (
RTF_STATIC = 0x800
RTF_UP = 0x1
RTF_USETRAILERS = 0x8000
- RTF_XRESOLVE = 0x200
+ RTM_80211INFO = 0x15
RTM_ADD = 0x1
+ RTM_BFD = 0x12
RTM_CHANGE = 0x3
+ RTM_CHGADDRATTR = 0x14
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_DESYNC = 0x10
RTM_GET = 0x4
RTM_IFANNOUNCE = 0xf
RTM_IFINFO = 0xe
- RTM_LOCK = 0x8
+ RTM_INVALIDATE = 0x11
RTM_LOSING = 0x5
RTM_MAXSIZE = 0x800
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
+ RTM_PROPOSAL = 0x13
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
- RTM_RTTUNIT = 0xf4240
+ RTM_SOURCE = 0x16
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
@@ -1099,67 +1257,74 @@ const (
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
+ RT_TABLEID_BITS = 0x8
+ RT_TABLEID_MASK = 0xff
RT_TABLEID_MAX = 0xff
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x4
+ SEEK_CUR = 0x1
+ SEEK_END = 0x2
+ SEEK_SET = 0x0
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDMULTI = 0x80206931
SIOCAIFADDR = 0x8040691a
SIOCAIFGROUP = 0x80246987
- SIOCALIFADDR = 0x8218691c
SIOCATMARK = 0x40047307
- SIOCBRDGADD = 0x8054693c
- SIOCBRDGADDS = 0x80546941
- SIOCBRDGARL = 0x806e694d
+ SIOCBRDGADD = 0x8060693c
+ SIOCBRDGADDL = 0x80606949
+ SIOCBRDGADDS = 0x80606941
+ SIOCBRDGARL = 0x808c694d
SIOCBRDGDADDR = 0x81286947
- SIOCBRDGDEL = 0x8054693d
- SIOCBRDGDELS = 0x80546942
- SIOCBRDGFLUSH = 0x80546948
- SIOCBRDGFRL = 0x806e694e
+ SIOCBRDGDEL = 0x8060693d
+ SIOCBRDGDELS = 0x80606942
+ SIOCBRDGFLUSH = 0x80606948
+ SIOCBRDGFRL = 0x808c694e
SIOCBRDGGCACHE = 0xc0146941
SIOCBRDGGFD = 0xc0146952
SIOCBRDGGHT = 0xc0146951
- SIOCBRDGGIFFLGS = 0xc054693e
+ SIOCBRDGGIFFLGS = 0xc060693e
SIOCBRDGGMA = 0xc0146953
- SIOCBRDGGPARAM = 0xc03c6958
+ SIOCBRDGGPARAM = 0xc0406958
SIOCBRDGGPRI = 0xc0146950
SIOCBRDGGRL = 0xc028694f
- SIOCBRDGGSIFS = 0xc054693c
SIOCBRDGGTO = 0xc0146946
- SIOCBRDGIFS = 0xc0546942
+ SIOCBRDGIFS = 0xc0606942
SIOCBRDGRTS = 0xc0186943
SIOCBRDGSADDR = 0xc1286944
SIOCBRDGSCACHE = 0x80146940
SIOCBRDGSFD = 0x80146952
SIOCBRDGSHT = 0x80146951
- SIOCBRDGSIFCOST = 0x80546955
- SIOCBRDGSIFFLGS = 0x8054693f
- SIOCBRDGSIFPRIO = 0x80546954
+ SIOCBRDGSIFCOST = 0x80606955
+ SIOCBRDGSIFFLGS = 0x8060693f
+ SIOCBRDGSIFPRIO = 0x80606954
+ SIOCBRDGSIFPROT = 0x8060694a
SIOCBRDGSMA = 0x80146953
SIOCBRDGSPRI = 0x80146950
SIOCBRDGSPROTO = 0x8014695a
SIOCBRDGSTO = 0x80146945
SIOCBRDGSTXHC = 0x80146959
+ SIOCDELLABEL = 0x80206997
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
SIOCDIFGROUP = 0x80246989
+ SIOCDIFPARENT = 0x802069b4
SIOCDIFPHYADDR = 0x80206949
- SIOCDLIFADDR = 0x8218691e
+ SIOCDPWE3NEIGHBOR = 0x802069de
+ SIOCDVNETID = 0x802069af
SIOCGETKALIVE = 0xc01869a4
SIOCGETLABEL = 0x8020699a
+ SIOCGETMPWCFG = 0xc02069ae
SIOCGETPFLOW = 0xc02069fe
SIOCGETPFSYNC = 0xc02069f8
SIOCGETSGCNT = 0xc0147534
SIOCGETVIFCNT = 0xc0147533
SIOCGETVLAN = 0xc0206990
- SIOCGHIWAT = 0x40047301
SIOCGIFADDR = 0xc0206921
- SIOCGIFASYNCMAP = 0xc020697c
SIOCGIFBRDADDR = 0xc0206923
SIOCGIFCONF = 0xc0086924
SIOCGIFDATA = 0xc020691b
@@ -1168,41 +1333,53 @@ const (
SIOCGIFFLAGS = 0xc0206911
SIOCGIFGATTR = 0xc024698b
SIOCGIFGENERIC = 0xc020693a
+ SIOCGIFGLIST = 0xc024698d
SIOCGIFGMEMB = 0xc024698a
SIOCGIFGROUP = 0xc0246988
SIOCGIFHARDMTU = 0xc02069a5
- SIOCGIFMEDIA = 0xc0286936
+ SIOCGIFLLPRIO = 0xc02069b6
+ SIOCGIFMEDIA = 0xc0386938
SIOCGIFMETRIC = 0xc0206917
SIOCGIFMTU = 0xc020697e
SIOCGIFNETMASK = 0xc0206925
- SIOCGIFPDSTADDR = 0xc0206948
+ SIOCGIFPAIR = 0xc02069b1
+ SIOCGIFPARENT = 0xc02069b3
SIOCGIFPRIORITY = 0xc020699c
- SIOCGIFPSRCADDR = 0xc0206947
SIOCGIFRDOMAIN = 0xc02069a0
SIOCGIFRTLABEL = 0xc0206983
SIOCGIFRXR = 0x802069aa
- SIOCGIFTIMESLOT = 0xc0206986
+ SIOCGIFSFFPAGE = 0xc1126939
SIOCGIFXFLAGS = 0xc020699e
- SIOCGLIFADDR = 0xc218691d
SIOCGLIFPHYADDR = 0xc218694b
+ SIOCGLIFPHYDF = 0xc02069c2
+ SIOCGLIFPHYECN = 0xc02069c8
SIOCGLIFPHYRTABLE = 0xc02069a2
SIOCGLIFPHYTTL = 0xc02069a9
- SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309
+ SIOCGPWE3 = 0xc0206998
+ SIOCGPWE3CTRLWORD = 0xc02069dc
+ SIOCGPWE3FAT = 0xc02069dd
+ SIOCGPWE3NEIGHBOR = 0xc21869de
+ SIOCGRXHPRIO = 0xc02069db
SIOCGSPPPPARAMS = 0xc0206994
+ SIOCGTXHPRIO = 0xc02069c6
+ SIOCGUMBINFO = 0xc02069be
+ SIOCGUMBPARAM = 0xc02069c0
SIOCGVH = 0xc02069f6
+ SIOCGVNETFLOWID = 0xc02069c4
SIOCGVNETID = 0xc02069a7
+ SIOCIFAFATTACH = 0x801169ab
+ SIOCIFAFDETACH = 0x801169ac
SIOCIFCREATE = 0x8020697a
SIOCIFDESTROY = 0x80206979
SIOCIFGCLONERS = 0xc00c6978
SIOCSETKALIVE = 0x801869a3
SIOCSETLABEL = 0x80206999
+ SIOCSETMPWCFG = 0x802069ad
SIOCSETPFLOW = 0x802069fd
SIOCSETPFSYNC = 0x802069f7
SIOCSETVLAN = 0x8020698f
- SIOCSHIWAT = 0x80047300
SIOCSIFADDR = 0x8020690c
- SIOCSIFASYNCMAP = 0x8020697d
SIOCSIFBRDADDR = 0x80206913
SIOCSIFDESCR = 0x80206980
SIOCSIFDSTADDR = 0x8020690e
@@ -1210,26 +1387,36 @@ const (
SIOCSIFGATTR = 0x8024698c
SIOCSIFGENERIC = 0x80206939
SIOCSIFLLADDR = 0x8020691f
- SIOCSIFMEDIA = 0xc0206935
+ SIOCSIFLLPRIO = 0x802069b5
+ SIOCSIFMEDIA = 0xc0206937
SIOCSIFMETRIC = 0x80206918
SIOCSIFMTU = 0x8020697f
SIOCSIFNETMASK = 0x80206916
- SIOCSIFPHYADDR = 0x80406946
+ SIOCSIFPAIR = 0x802069b0
+ SIOCSIFPARENT = 0x802069b2
SIOCSIFPRIORITY = 0x8020699b
SIOCSIFRDOMAIN = 0x8020699f
SIOCSIFRTLABEL = 0x80206982
- SIOCSIFTIMESLOT = 0x80206985
SIOCSIFXFLAGS = 0x8020699d
SIOCSLIFPHYADDR = 0x8218694a
+ SIOCSLIFPHYDF = 0x802069c1
+ SIOCSLIFPHYECN = 0x802069c7
SIOCSLIFPHYRTABLE = 0x802069a1
SIOCSLIFPHYTTL = 0x802069a8
- SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308
+ SIOCSPWE3CTRLWORD = 0x802069dc
+ SIOCSPWE3FAT = 0x802069dd
+ SIOCSPWE3NEIGHBOR = 0x821869de
+ SIOCSRXHPRIO = 0x802069db
SIOCSSPPPPARAMS = 0x80206993
+ SIOCSTXHPRIO = 0x802069c5
+ SIOCSUMBPARAM = 0x802069bf
SIOCSVH = 0xc02069f5
+ SIOCSVNETFLOWID = 0x802069c3
SIOCSVNETID = 0x802069a6
SOCK_CLOEXEC = 0x8000
SOCK_DGRAM = 0x2
+ SOCK_DNS = 0x1000
SOCK_NONBLOCK = 0x4000
SOCK_RAW = 0x3
SOCK_RDM = 0x4
@@ -1241,6 +1428,7 @@ const (
SO_BINDANY = 0x1000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
+ SO_DOMAIN = 0x1024
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
@@ -1248,6 +1436,7 @@ const (
SO_NETPROC = 0x1020
SO_OOBINLINE = 0x100
SO_PEERCRED = 0x1022
+ SO_PROTOCOL = 0x1025
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
@@ -1261,6 +1450,7 @@ const (
SO_TIMESTAMP = 0x800
SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40
+ SO_ZEROIZE = 0x2000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
@@ -1290,9 +1480,24 @@ const (
S_IXOTH = 0x1
S_IXUSR = 0x40
TCIFLUSH = 0x1
+ TCIOFF = 0x3
TCIOFLUSH = 0x3
+ TCION = 0x4
TCOFLUSH = 0x2
- TCP_MAXBURST = 0x4
+ TCOOFF = 0x1
+ TCOON = 0x2
+ TCPOPT_EOL = 0x0
+ TCPOPT_MAXSEG = 0x2
+ TCPOPT_NOP = 0x1
+ TCPOPT_SACK = 0x5
+ TCPOPT_SACK_HDR = 0x1010500
+ TCPOPT_SACK_PERMITTED = 0x4
+ TCPOPT_SACK_PERMIT_HDR = 0x1010402
+ TCPOPT_SIGNATURE = 0x13
+ TCPOPT_TIMESTAMP = 0x8
+ TCPOPT_TSTAMP_HDR = 0x101080a
+ TCPOPT_WINDOW = 0x3
+ TCP_INFO = 0x9
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x3
@@ -1301,11 +1506,15 @@ const (
TCP_MSS = 0x200
TCP_NODELAY = 0x1
TCP_NOPUSH = 0x10
- TCP_NSTATES = 0xb
+ TCP_SACKHOLE_LIMIT = 0x80
TCP_SACK_ENABLE = 0x8
TCSAFLUSH = 0x2
+ TIMER_ABSTIME = 0x1
+ TIMER_RELTIME = 0x0
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
+ TIOCCHKVERAUTH = 0x2000741e
+ TIOCCLRVERAUTH = 0x2000741d
TIOCCONS = 0x80047462
TIOCDRAIN = 0x2000745e
TIOCEXCL = 0x2000740d
@@ -1321,7 +1530,7 @@ const (
TIOCGFLAGS = 0x4004745d
TIOCGPGRP = 0x40047477
TIOCGSID = 0x40047463
- TIOCGTSTAMP = 0x400c745b
+ TIOCGTSTAMP = 0x4010745b
TIOCGWINSZ = 0x40087468
TIOCMBIC = 0x8004746b
TIOCMBIS = 0x8004746c
@@ -1360,17 +1569,21 @@ const (
TIOCSETAF = 0x802c7416
TIOCSETAW = 0x802c7415
TIOCSETD = 0x8004741b
+ TIOCSETVERAUTH = 0x8004741c
TIOCSFLAGS = 0x8004745c
TIOCSIG = 0x8004745f
TIOCSPGRP = 0x80047476
TIOCSTART = 0x2000746e
- TIOCSTAT = 0x80047465
- TIOCSTI = 0x80017472
+ TIOCSTAT = 0x20007465
TIOCSTOP = 0x2000746f
TIOCSTSTAMP = 0x8008745a
TIOCSWINSZ = 0x80087467
TIOCUCNTL = 0x80047466
+ TIOCUCNTL_CBRK = 0x7a
+ TIOCUCNTL_SBRK = 0x7b
TOSTOP = 0x400000
+ UTIME_NOW = -0x2
+ UTIME_OMIT = -0x1
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0
@@ -1381,6 +1594,19 @@ const (
VKILL = 0x5
VLNEXT = 0xe
VMIN = 0x10
+ VM_ANONMIN = 0x7
+ VM_LOADAVG = 0x2
+ VM_MALLOC_CONF = 0xc
+ VM_MAXID = 0xd
+ VM_MAXSLP = 0xa
+ VM_METER = 0x1
+ VM_NKMEMPAGES = 0x6
+ VM_PSSTRINGS = 0x3
+ VM_SWAPENCRYPT = 0x5
+ VM_USPACE = 0xb
+ VM_UVMEXP = 0x4
+ VM_VNODEMIN = 0x9
+ VM_VTEXTMIN = 0x8
VQUIT = 0x9
VREPRINT = 0x6
VSTART = 0xc
@@ -1394,6 +1620,7 @@ const (
WCOREFLAG = 0x80
WNOHANG = 0x1
WUNTRACED = 0x2
+ XCASE = 0x1000000
)
// Errors
@@ -1407,6 +1634,7 @@ const (
EALREADY = syscall.Errno(0x25)
EAUTH = syscall.Errno(0x50)
EBADF = syscall.Errno(0x9)
+ EBADMSG = syscall.Errno(0x5c)
EBADRPC = syscall.Errno(0x48)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x58)
@@ -1433,7 +1661,7 @@ const (
EIPSEC = syscall.Errno(0x52)
EISCONN = syscall.Errno(0x38)
EISDIR = syscall.Errno(0x15)
- ELAST = syscall.Errno(0x5b)
+ ELAST = syscall.Errno(0x5f)
ELOOP = syscall.Errno(0x3e)
EMEDIUMTYPE = syscall.Errno(0x56)
EMFILE = syscall.Errno(0x18)
@@ -1461,12 +1689,14 @@ const (
ENOTCONN = syscall.Errno(0x39)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x42)
+ ENOTRECOVERABLE = syscall.Errno(0x5d)
ENOTSOCK = syscall.Errno(0x26)
ENOTSUP = syscall.Errno(0x5b)
ENOTTY = syscall.Errno(0x19)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x2d)
EOVERFLOW = syscall.Errno(0x57)
+ EOWNERDEAD = syscall.Errno(0x5e)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x2e)
EPIPE = syscall.Errno(0x20)
@@ -1474,6 +1704,7 @@ const (
EPROCUNAVAIL = syscall.Errno(0x4c)
EPROGMISMATCH = syscall.Errno(0x4b)
EPROGUNAVAIL = syscall.Errno(0x4a)
+ EPROTO = syscall.Errno(0x5f)
EPROTONOSUPPORT = syscall.Errno(0x2b)
EPROTOTYPE = syscall.Errno(0x29)
ERANGE = syscall.Errno(0x22)
@@ -1570,7 +1801,7 @@ var errorList = [...]struct {
{32, "EPIPE", "broken pipe"},
{33, "EDOM", "numerical argument out of domain"},
{34, "ERANGE", "result too large"},
- {35, "EWOULDBLOCK", "resource temporarily unavailable"},
+ {35, "EAGAIN", "resource temporarily unavailable"},
{36, "EINPROGRESS", "operation now in progress"},
{37, "EALREADY", "operation already in progress"},
{38, "ENOTSOCK", "socket operation on non-socket"},
@@ -1626,7 +1857,11 @@ var errorList = [...]struct {
{88, "ECANCELED", "operation canceled"},
{89, "EIDRM", "identifier removed"},
{90, "ENOMSG", "no message of desired type"},
- {91, "ELAST", "not supported"},
+ {91, "ENOTSUP", "not supported"},
+ {92, "EBADMSG", "bad message"},
+ {93, "ENOTRECOVERABLE", "state not recoverable"},
+ {94, "EOWNERDEAD", "previous owner died"},
+ {95, "ELAST", "protocol error"},
}
// Signal table
@@ -1640,7 +1875,7 @@ var signalList = [...]struct {
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/BPT trap"},
- {6, "SIGABRT", "abort trap"},
+ {6, "SIGIOT", "abort trap"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
@@ -1667,4 +1902,5 @@ var signalList = [...]struct {
{30, "SIGUSR1", "user defined signal 1"},
{31, "SIGUSR2", "user defined signal 2"},
{32, "SIGTHR", "thread AST"},
+ {28672, "SIGSTKSZ", "unknown signal"},
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
index 90de7dfc33a3..ae16fe7542ae 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
@@ -112,6 +112,12 @@ const (
BPF_FILDROP_CAPTURE = 0x1
BPF_FILDROP_DROP = 0x2
BPF_FILDROP_PASS = 0x0
+ BPF_F_DIR_IN = 0x10
+ BPF_F_DIR_MASK = 0x30
+ BPF_F_DIR_OUT = 0x20
+ BPF_F_DIR_SHIFT = 0x4
+ BPF_F_FLOWID = 0x8
+ BPF_F_PRI_MASK = 0x7
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -140,6 +146,7 @@ const (
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
+ BPF_RND = 0xc0
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
@@ -180,7 +187,65 @@ const (
CTL_KERN = 0x1
CTL_MAXNAME = 0xc
CTL_NET = 0x4
+ DIOCADDQUEUE = 0xc110445d
+ DIOCADDRULE = 0xcd604404
+ DIOCADDSTATE = 0xc1084425
+ DIOCCHANGERULE = 0xcd60441a
+ DIOCCLRIFFLAG = 0xc028445a
+ DIOCCLRSRCNODES = 0x20004455
+ DIOCCLRSTATES = 0xc0e04412
+ DIOCCLRSTATUS = 0xc0284416
+ DIOCGETLIMIT = 0xc0084427
+ DIOCGETQSTATS = 0xc1204460
+ DIOCGETQUEUE = 0xc110445f
+ DIOCGETQUEUES = 0xc110445e
+ DIOCGETRULE = 0xcd604407
+ DIOCGETRULES = 0xcd604406
+ DIOCGETRULESET = 0xc444443b
+ DIOCGETRULESETS = 0xc444443a
+ DIOCGETSRCNODES = 0xc0104454
+ DIOCGETSTATE = 0xc1084413
+ DIOCGETSTATES = 0xc0104419
+ DIOCGETSTATUS = 0xc1e84415
+ DIOCGETSYNFLWATS = 0xc0084463
+ DIOCGETTIMEOUT = 0xc008441e
+ DIOCIGETIFACES = 0xc0284457
+ DIOCKILLSRCNODES = 0xc080445b
+ DIOCKILLSTATES = 0xc0e04429
+ DIOCNATLOOK = 0xc0504417
+ DIOCOSFPADD = 0xc088444f
DIOCOSFPFLUSH = 0x2000444e
+ DIOCOSFPGET = 0xc0884450
+ DIOCRADDADDRS = 0xc4504443
+ DIOCRADDTABLES = 0xc450443d
+ DIOCRCLRADDRS = 0xc4504442
+ DIOCRCLRASTATS = 0xc4504448
+ DIOCRCLRTABLES = 0xc450443c
+ DIOCRCLRTSTATS = 0xc4504441
+ DIOCRDELADDRS = 0xc4504444
+ DIOCRDELTABLES = 0xc450443e
+ DIOCRGETADDRS = 0xc4504446
+ DIOCRGETASTATS = 0xc4504447
+ DIOCRGETTABLES = 0xc450443f
+ DIOCRGETTSTATS = 0xc4504440
+ DIOCRINADEFINE = 0xc450444d
+ DIOCRSETADDRS = 0xc4504445
+ DIOCRSETTFLAGS = 0xc450444a
+ DIOCRTSTADDRS = 0xc4504449
+ DIOCSETDEBUG = 0xc0044418
+ DIOCSETHOSTID = 0xc0044456
+ DIOCSETIFFLAG = 0xc0284459
+ DIOCSETLIMIT = 0xc0084428
+ DIOCSETREASS = 0xc004445c
+ DIOCSETSTATUSIF = 0xc0284414
+ DIOCSETSYNCOOKIES = 0xc0014462
+ DIOCSETSYNFLWATS = 0xc0084461
+ DIOCSETTIMEOUT = 0xc008441d
+ DIOCSTART = 0x20004401
+ DIOCSTOP = 0x20004402
+ DIOCXBEGIN = 0xc0104451
+ DIOCXCOMMIT = 0xc0104452
+ DIOCXROLLBACK = 0xc0104453
DLT_ARCNET = 0x7
DLT_ATM_RFC1483 = 0xb
DLT_AX25 = 0x3
@@ -243,6 +308,8 @@ const (
EMUL_ENABLED = 0x1
EMUL_NATIVE = 0x2
ENDRUNDISC = 0x9
+ ETH64_8021_RSVD_MASK = 0xfffffffffff0
+ ETH64_8021_RSVD_PREFIX = 0x180c2000000
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERTYPE_8023 = 0x4
@@ -295,6 +362,7 @@ const (
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_EAPOL = 0x888e
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
@@ -326,6 +394,7 @@ const (
ETHERTYPE_LLDP = 0x88cc
ETHERTYPE_LOGICRAFT = 0x8148
ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MACSEC = 0x88e5
ETHERTYPE_MATRA = 0x807a
ETHERTYPE_MAX = 0xffff
ETHERTYPE_MERIT = 0x807c
@@ -354,15 +423,16 @@ const (
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NHRP = 0x2001
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NSH = 0x984f
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
- ETHERTYPE_PAE = 0x888e
ETHERTYPE_PBB = 0x88e7
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
@@ -445,10 +515,11 @@ const (
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = -0x3
EVFILT_DEVICE = -0x8
+ EVFILT_EXCEPT = -0x9
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
- EVFILT_SYSCOUNT = 0x8
+ EVFILT_SYSCOUNT = 0x9
EVFILT_TIMER = -0x7
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
@@ -470,7 +541,7 @@ const (
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_RECEIPT = 0x40
- EV_SYSFLAGS = 0xf000
+ EV_SYSFLAGS = 0xf800
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
@@ -736,6 +807,7 @@ const (
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
+ IFT_WIREGUARD = 0xfb
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
@@ -801,9 +873,11 @@ const (
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
IPV6_AUTH_LEVEL = 0x35
IPV6_AUTOFLOWLABEL = 0x3b
IPV6_CHECKSUM = 0x1a
@@ -910,6 +984,9 @@ const (
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
+ ITIMER_PROF = 0x2
+ ITIMER_REAL = 0x0
+ ITIMER_VIRTUAL = 0x1
IUCLC = 0x1000
IXANY = 0x800
IXOFF = 0x400
@@ -981,6 +1058,19 @@ const (
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
+ MOUNT_AFS = "afs"
+ MOUNT_CD9660 = "cd9660"
+ MOUNT_EXT2FS = "ext2fs"
+ MOUNT_FFS = "ffs"
+ MOUNT_FUSEFS = "fuse"
+ MOUNT_MFS = "mfs"
+ MOUNT_MSDOS = "msdos"
+ MOUNT_NCPFS = "ncpfs"
+ MOUNT_NFS = "nfs"
+ MOUNT_NTFS = "ntfs"
+ MOUNT_TMPFS = "tmpfs"
+ MOUNT_UDF = "udf"
+ MOUNT_UFS = "ffs"
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
@@ -993,6 +1083,7 @@ const (
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
+ MSG_WAITFORONE = 0x1000
MS_ASYNC = 0x1
MS_INVALIDATE = 0x4
MS_SYNC = 0x2
@@ -1001,7 +1092,8 @@ const (
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
NET_RT_IFNAMES = 0x6
- NET_RT_MAXID = 0x7
+ NET_RT_MAXID = 0x8
+ NET_RT_SOURCE = 0x7
NET_RT_STATS = 0x4
NET_RT_TABLE = 0x5
NFDBITS = 0x20
@@ -1018,6 +1110,7 @@ const (
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
+ NOTE_OOB = 0x4
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
@@ -1154,7 +1247,7 @@ const (
RTM_PROPOSAL = 0x13
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
- RTM_RTTUNIT = 0xf4240
+ RTM_SOURCE = 0x16
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
@@ -1172,6 +1265,9 @@ const (
RUSAGE_THREAD = 0x1
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x4
+ SEEK_CUR = 0x1
+ SEEK_END = 0x2
+ SEEK_SET = 0x0
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1188,30 +1284,30 @@ const (
SIOCBRDGDELS = 0x80606942
SIOCBRDGFLUSH = 0x80606948
SIOCBRDGFRL = 0x808c694e
- SIOCBRDGGCACHE = 0xc0186941
- SIOCBRDGGFD = 0xc0186952
- SIOCBRDGGHT = 0xc0186951
+ SIOCBRDGGCACHE = 0xc0146941
+ SIOCBRDGGFD = 0xc0146952
+ SIOCBRDGGHT = 0xc0146951
SIOCBRDGGIFFLGS = 0xc060693e
- SIOCBRDGGMA = 0xc0186953
+ SIOCBRDGGMA = 0xc0146953
SIOCBRDGGPARAM = 0xc0406958
- SIOCBRDGGPRI = 0xc0186950
+ SIOCBRDGGPRI = 0xc0146950
SIOCBRDGGRL = 0xc030694f
- SIOCBRDGGTO = 0xc0186946
+ SIOCBRDGGTO = 0xc0146946
SIOCBRDGIFS = 0xc0606942
SIOCBRDGRTS = 0xc0206943
SIOCBRDGSADDR = 0xc1286944
- SIOCBRDGSCACHE = 0x80186940
- SIOCBRDGSFD = 0x80186952
- SIOCBRDGSHT = 0x80186951
+ SIOCBRDGSCACHE = 0x80146940
+ SIOCBRDGSFD = 0x80146952
+ SIOCBRDGSHT = 0x80146951
SIOCBRDGSIFCOST = 0x80606955
SIOCBRDGSIFFLGS = 0x8060693f
SIOCBRDGSIFPRIO = 0x80606954
SIOCBRDGSIFPROT = 0x8060694a
- SIOCBRDGSMA = 0x80186953
- SIOCBRDGSPRI = 0x80186950
- SIOCBRDGSPROTO = 0x8018695a
- SIOCBRDGSTO = 0x80186945
- SIOCBRDGSTXHC = 0x80186959
+ SIOCBRDGSMA = 0x80146953
+ SIOCBRDGSPRI = 0x80146950
+ SIOCBRDGSPROTO = 0x8014695a
+ SIOCBRDGSTO = 0x80146945
+ SIOCBRDGSTXHC = 0x80146959
SIOCDELLABEL = 0x80206997
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
@@ -1264,6 +1360,7 @@ const (
SIOCGPWE3CTRLWORD = 0xc02069dc
SIOCGPWE3FAT = 0xc02069dd
SIOCGPWE3NEIGHBOR = 0xc21869de
+ SIOCGRXHPRIO = 0xc02069db
SIOCGSPPPPARAMS = 0xc0206994
SIOCGTXHPRIO = 0xc02069c6
SIOCGUMBINFO = 0xc02069be
@@ -1310,17 +1407,13 @@ const (
SIOCSPWE3CTRLWORD = 0x802069dc
SIOCSPWE3FAT = 0x802069dd
SIOCSPWE3NEIGHBOR = 0x821869de
+ SIOCSRXHPRIO = 0x802069db
SIOCSSPPPPARAMS = 0x80206993
SIOCSTXHPRIO = 0x802069c5
SIOCSUMBPARAM = 0x802069bf
SIOCSVH = 0xc02069f5
SIOCSVNETFLOWID = 0x802069c3
SIOCSVNETID = 0x802069a6
- SIOCSWGDPID = 0xc018695b
- SIOCSWGMAXFLOW = 0xc0186960
- SIOCSWGMAXGROUP = 0xc018695d
- SIOCSWSDPID = 0x8018695c
- SIOCSWSPORTNO = 0xc060695f
SOCK_CLOEXEC = 0x8000
SOCK_DGRAM = 0x2
SOCK_DNS = 0x1000
@@ -1335,6 +1428,7 @@ const (
SO_BINDANY = 0x1000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
+ SO_DOMAIN = 0x1024
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
@@ -1342,6 +1436,7 @@ const (
SO_NETPROC = 0x1020
SO_OOBINLINE = 0x100
SO_PEERCRED = 0x1022
+ SO_PROTOCOL = 0x1025
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
@@ -1391,7 +1486,18 @@ const (
TCOFLUSH = 0x2
TCOOFF = 0x1
TCOON = 0x2
- TCP_MAXBURST = 0x4
+ TCPOPT_EOL = 0x0
+ TCPOPT_MAXSEG = 0x2
+ TCPOPT_NOP = 0x1
+ TCPOPT_SACK = 0x5
+ TCPOPT_SACK_HDR = 0x1010500
+ TCPOPT_SACK_PERMITTED = 0x4
+ TCPOPT_SACK_PERMIT_HDR = 0x1010402
+ TCPOPT_SIGNATURE = 0x13
+ TCPOPT_TIMESTAMP = 0x8
+ TCPOPT_TSTAMP_HDR = 0x101080a
+ TCPOPT_WINDOW = 0x3
+ TCP_INFO = 0x9
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x3
@@ -1400,6 +1506,7 @@ const (
TCP_MSS = 0x200
TCP_NODELAY = 0x1
TCP_NOPUSH = 0x10
+ TCP_SACKHOLE_LIMIT = 0x80
TCP_SACK_ENABLE = 0x8
TCSAFLUSH = 0x2
TIMER_ABSTIME = 0x1
@@ -1768,7 +1875,7 @@ var signalList = [...]struct {
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/BPT trap"},
- {6, "SIGABRT", "abort trap"},
+ {6, "SIGIOT", "abort trap"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
@@ -1795,4 +1902,5 @@ var signalList = [...]struct {
{30, "SIGUSR1", "user defined signal 1"},
{31, "SIGUSR2", "user defined signal 2"},
{32, "SIGTHR", "thread AST"},
+ {28672, "SIGSTKSZ", "unknown signal"},
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
index f1154ff56f6c..03d90fe35501 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
@@ -112,6 +112,12 @@ const (
BPF_FILDROP_CAPTURE = 0x1
BPF_FILDROP_DROP = 0x2
BPF_FILDROP_PASS = 0x0
+ BPF_F_DIR_IN = 0x10
+ BPF_F_DIR_MASK = 0x30
+ BPF_F_DIR_OUT = 0x20
+ BPF_F_DIR_SHIFT = 0x4
+ BPF_F_FLOWID = 0x8
+ BPF_F_PRI_MASK = 0x7
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -140,6 +146,7 @@ const (
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
+ BPF_RND = 0xc0
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
@@ -301,6 +308,8 @@ const (
EMUL_ENABLED = 0x1
EMUL_NATIVE = 0x2
ENDRUNDISC = 0x9
+ ETH64_8021_RSVD_MASK = 0xfffffffffff0
+ ETH64_8021_RSVD_PREFIX = 0x180c2000000
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERTYPE_8023 = 0x4
@@ -353,6 +362,7 @@ const (
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_EAPOL = 0x888e
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
@@ -413,15 +423,16 @@ const (
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NHRP = 0x2001
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NSH = 0x984f
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
- ETHERTYPE_PAE = 0x888e
ETHERTYPE_PBB = 0x88e7
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
@@ -504,10 +515,11 @@ const (
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = -0x3
EVFILT_DEVICE = -0x8
+ EVFILT_EXCEPT = -0x9
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
- EVFILT_SYSCOUNT = 0x8
+ EVFILT_SYSCOUNT = 0x9
EVFILT_TIMER = -0x7
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
@@ -529,7 +541,7 @@ const (
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_RECEIPT = 0x40
- EV_SYSFLAGS = 0xf000
+ EV_SYSFLAGS = 0xf800
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
@@ -795,6 +807,7 @@ const (
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
+ IFT_WIREGUARD = 0xfb
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
@@ -860,6 +873,7 @@ const (
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
@@ -970,6 +984,9 @@ const (
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
+ ITIMER_PROF = 0x2
+ ITIMER_REAL = 0x0
+ ITIMER_VIRTUAL = 0x1
IUCLC = 0x1000
IXANY = 0x800
IXOFF = 0x400
@@ -1041,6 +1058,19 @@ const (
MNT_WAIT = 0x1
MNT_WANTRDWR = 0x2000000
MNT_WXALLOWED = 0x800
+ MOUNT_AFS = "afs"
+ MOUNT_CD9660 = "cd9660"
+ MOUNT_EXT2FS = "ext2fs"
+ MOUNT_FFS = "ffs"
+ MOUNT_FUSEFS = "fuse"
+ MOUNT_MFS = "mfs"
+ MOUNT_MSDOS = "msdos"
+ MOUNT_NCPFS = "ncpfs"
+ MOUNT_NFS = "nfs"
+ MOUNT_NTFS = "ntfs"
+ MOUNT_TMPFS = "tmpfs"
+ MOUNT_UDF = "udf"
+ MOUNT_UFS = "ffs"
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
@@ -1053,6 +1083,7 @@ const (
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
+ MSG_WAITFORONE = 0x1000
MS_ASYNC = 0x1
MS_INVALIDATE = 0x4
MS_SYNC = 0x2
@@ -1061,7 +1092,8 @@ const (
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
NET_RT_IFNAMES = 0x6
- NET_RT_MAXID = 0x7
+ NET_RT_MAXID = 0x8
+ NET_RT_SOURCE = 0x7
NET_RT_STATS = 0x4
NET_RT_TABLE = 0x5
NFDBITS = 0x20
@@ -1078,6 +1110,7 @@ const (
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
+ NOTE_OOB = 0x4
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
@@ -1214,7 +1247,7 @@ const (
RTM_PROPOSAL = 0x13
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
- RTM_RTTUNIT = 0xf4240
+ RTM_SOURCE = 0x16
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
@@ -1232,6 +1265,9 @@ const (
RUSAGE_THREAD = 0x1
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x4
+ SEEK_CUR = 0x1
+ SEEK_END = 0x2
+ SEEK_SET = 0x0
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1248,30 +1284,30 @@ const (
SIOCBRDGDELS = 0x80606942
SIOCBRDGFLUSH = 0x80606948
SIOCBRDGFRL = 0x808c694e
- SIOCBRDGGCACHE = 0xc0186941
- SIOCBRDGGFD = 0xc0186952
- SIOCBRDGGHT = 0xc0186951
+ SIOCBRDGGCACHE = 0xc0146941
+ SIOCBRDGGFD = 0xc0146952
+ SIOCBRDGGHT = 0xc0146951
SIOCBRDGGIFFLGS = 0xc060693e
- SIOCBRDGGMA = 0xc0186953
+ SIOCBRDGGMA = 0xc0146953
SIOCBRDGGPARAM = 0xc0406958
- SIOCBRDGGPRI = 0xc0186950
+ SIOCBRDGGPRI = 0xc0146950
SIOCBRDGGRL = 0xc030694f
- SIOCBRDGGTO = 0xc0186946
+ SIOCBRDGGTO = 0xc0146946
SIOCBRDGIFS = 0xc0606942
SIOCBRDGRTS = 0xc0206943
SIOCBRDGSADDR = 0xc1286944
- SIOCBRDGSCACHE = 0x80186940
- SIOCBRDGSFD = 0x80186952
- SIOCBRDGSHT = 0x80186951
+ SIOCBRDGSCACHE = 0x80146940
+ SIOCBRDGSFD = 0x80146952
+ SIOCBRDGSHT = 0x80146951
SIOCBRDGSIFCOST = 0x80606955
SIOCBRDGSIFFLGS = 0x8060693f
SIOCBRDGSIFPRIO = 0x80606954
SIOCBRDGSIFPROT = 0x8060694a
- SIOCBRDGSMA = 0x80186953
- SIOCBRDGSPRI = 0x80186950
- SIOCBRDGSPROTO = 0x8018695a
- SIOCBRDGSTO = 0x80186945
- SIOCBRDGSTXHC = 0x80186959
+ SIOCBRDGSMA = 0x80146953
+ SIOCBRDGSPRI = 0x80146950
+ SIOCBRDGSPROTO = 0x8014695a
+ SIOCBRDGSTO = 0x80146945
+ SIOCBRDGSTXHC = 0x80146959
SIOCDELLABEL = 0x80206997
SIOCDELMULTI = 0x80206932
SIOCDIFADDR = 0x80206919
@@ -1378,11 +1414,6 @@ const (
SIOCSVH = 0xc02069f5
SIOCSVNETFLOWID = 0x802069c3
SIOCSVNETID = 0x802069a6
- SIOCSWGDPID = 0xc018695b
- SIOCSWGMAXFLOW = 0xc0186960
- SIOCSWGMAXGROUP = 0xc018695d
- SIOCSWSDPID = 0x8018695c
- SIOCSWSPORTNO = 0xc060695f
SOCK_CLOEXEC = 0x8000
SOCK_DGRAM = 0x2
SOCK_DNS = 0x1000
@@ -1455,7 +1486,18 @@ const (
TCOFLUSH = 0x2
TCOOFF = 0x1
TCOON = 0x2
- TCP_MAXBURST = 0x4
+ TCPOPT_EOL = 0x0
+ TCPOPT_MAXSEG = 0x2
+ TCPOPT_NOP = 0x1
+ TCPOPT_SACK = 0x5
+ TCPOPT_SACK_HDR = 0x1010500
+ TCPOPT_SACK_PERMITTED = 0x4
+ TCPOPT_SACK_PERMIT_HDR = 0x1010402
+ TCPOPT_SIGNATURE = 0x13
+ TCPOPT_TIMESTAMP = 0x8
+ TCPOPT_TSTAMP_HDR = 0x101080a
+ TCPOPT_WINDOW = 0x3
+ TCP_INFO = 0x9
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x3
@@ -1833,7 +1875,7 @@ var signalList = [...]struct {
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/BPT trap"},
- {6, "SIGABRT", "abort trap"},
+ {6, "SIGIOT", "abort trap"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
@@ -1860,4 +1902,5 @@ var signalList = [...]struct {
{30, "SIGUSR1", "user defined signal 1"},
{31, "SIGUSR2", "user defined signal 2"},
{32, "SIGTHR", "thread AST"},
+ {81920, "SIGSTKSZ", "unknown signal"},
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
index 1b6eedfa6115..54749f9c5ed7 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
@@ -552,6 +552,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
index 039c4aa06c2c..77479d458155 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
@@ -544,6 +544,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
index 0535d3cfdf2b..2e966d4d7a6c 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
@@ -544,6 +544,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
index 1018b5221704..d65a7c0fa6e9 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
@@ -544,6 +544,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
index 3802f4b379a5..6f0b97c6db3a 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
@@ -544,6 +544,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
index 8a2db7da9f3e..e1c23b527236 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
@@ -544,6 +544,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
index 4af561a48d8c..79f7389963ec 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
@@ -521,6 +521,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
index 3b90e9448add..fb161f3a2636 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
@@ -521,6 +521,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
index 890f4ccd131c..4c8ac993a880 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
@@ -521,6 +521,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
index c79f071fc6a8..76dd8ec4fdb9 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
@@ -521,6 +521,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 2925fe0a7b73..caeb807bd4e8 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
index 75eb2f5f3f72..087444250c9a 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
@@ -5,792 +5,665 @@
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
-
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4
DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
-
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4
DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
-
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4
DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
-
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4
DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
-
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4
DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
-
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4
DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
-
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4
DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
-
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
-
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4
DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
-
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
-
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
-
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4
DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
-
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4
DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
-
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4
DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
-
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4
DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
-
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4
DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
-
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4
DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
-
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4
DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
-
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4
DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
-
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4
DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
-
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4
DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
-
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4
DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
-
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4
DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
-
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4
DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
-
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4
DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
-
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4
DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
-
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4
DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
-
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4
DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
-
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4
DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
-
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4
DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
-
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4
DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
-
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
-
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
-
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4
DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
-
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4
DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
-
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4
DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
-
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
-
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4
DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
-
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4
DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
-
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4
DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
-
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4
DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB)
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4
+DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB)
+
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
-
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4
DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
-
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
-
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
-
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
-
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4
DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
-
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4
DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
-
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
-
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
-
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
-
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
-
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
-
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
-
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4
DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
-
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4
DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
-
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
-
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
-
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
-
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4
DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
-
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4
DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
-
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
-
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
-
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
-
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
-
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
-
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
-
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
-
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
-
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
-
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
-
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
-
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
-
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4
DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
-
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
-
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4
DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
-
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4
DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
-
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4
DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
-
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4
DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
-
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4
DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
-
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
-
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4
DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
-
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4
DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
-
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
-
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
-
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
-
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
-
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
-
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
-
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4
DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
-
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4
DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
-
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4
DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
-
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4
DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
-
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4
DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
-
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4
DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
-
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4
DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
-
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
-
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
-
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4
DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
-
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4
DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
-
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4
DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
-
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
-
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4
DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
-
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4
DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
-
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
-
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
-
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
-
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4
DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
-
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
-
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4
DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
-
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
-
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
-
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
-
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
-
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4
DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
-
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4
DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
-
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
-
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4
DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
-
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
-
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4
DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
-
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4
DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
-
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
-
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
-
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4
DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
-
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4
DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
-
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4
DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
-
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
-
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
-
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4
DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
-
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4
DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
-
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4
DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
-
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4
DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
-
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4
DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 98446d2b9540..a05e5f4fff6d 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
index 243a6663ce67..5782cd108447 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
@@ -5,792 +5,665 @@
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
-
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
-
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
-
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8
DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
-
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8
DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
-
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8
DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
-
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8
DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
-
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8
DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
-
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
-
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
-
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
-
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
-
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8
DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
-
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8
DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
-
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
-
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
-
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
-
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
-
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8
DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
-
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
-
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
-
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8
DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
-
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8
DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
-
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
-
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
-
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8
DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
-
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8
DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
-
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
-
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
-
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8
DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
-
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8
DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
-
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
-
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
-
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
-
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8
DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
-
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8
DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
-
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8
DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
-
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
-
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
-
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
-
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8
DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
-
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
+DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
+
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
-
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8
DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
-
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
-
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
-
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
-
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8
DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
-
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8
DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
-
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
-
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
-
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
-
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
-
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
-
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
-
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8
DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
-
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
-
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
-
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
-
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
-
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8
DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
-
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
-
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
-
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
-
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
-
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
-
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
-
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
-
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
-
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
-
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
-
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
-
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
-
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
-
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
-
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
-
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8
DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
-
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8
DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
-
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8
DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
-
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
-
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8
DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
-
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
-
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8
DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
-
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
-
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
-
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
-
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
-
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
-
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
-
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
-
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
-
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8
DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
-
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8
DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
-
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
-
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8
DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
-
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8
DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
-
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8
DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
-
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
-
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
-
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8
DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
-
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8
DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
-
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8
DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
-
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
-
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8
DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
-
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8
DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
-
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
-
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
-
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
-
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8
DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
-
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
-
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
-
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
-
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
-
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
-
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
-
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
-
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
-
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
-
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
-
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
-
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8
DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
-
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
-
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
-
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
-
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8
DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
-
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
-
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8
DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
-
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
-
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
-
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8
DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
-
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8
DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
-
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
-
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
-
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index 8da6791d1e33..b2da8e50cc7a 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
index 9ad116d9fbdd..cf310420c942 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
@@ -5,792 +5,665 @@
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
-
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4
DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
-
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4
DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
-
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4
DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
-
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4
DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
-
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4
DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
-
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4
DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
-
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4
DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
-
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
-
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4
DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
-
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
-
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
-
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4
DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
-
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4
DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
-
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4
DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
-
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4
DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
-
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4
DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
-
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4
DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
-
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4
DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
-
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4
DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
-
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4
DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
-
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4
DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
-
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4
DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
-
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4
DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
-
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4
DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
-
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4
DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
-
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4
DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
-
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4
DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
-
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4
DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
-
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4
DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
-
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4
DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
-
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4
DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
-
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
-
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
-
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4
DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
-
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4
DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
-
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4
DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
-
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
-
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4
DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
-
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4
DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
-
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4
DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
-
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4
DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB)
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4
+DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB)
+
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
-
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4
DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
-
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
-
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
-
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
-
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4
DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
-
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4
DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
-
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
-
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
-
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
-
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
-
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
-
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
-
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4
DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
-
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4
DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
-
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
-
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
-
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
-
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4
DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
-
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4
DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
-
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
-
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
-
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
-
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
-
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
-
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
-
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
-
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
-
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
-
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
-
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
-
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
-
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4
DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
-
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
-
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4
DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
-
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4
DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
-
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4
DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
-
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4
DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
-
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4
DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
-
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
-
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4
DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
-
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4
DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
-
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
-
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
-
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
-
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
-
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
-
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
-
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4
DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
-
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4
DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
-
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4
DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
-
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4
DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
-
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4
DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
-
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4
DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
-
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4
DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
-
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
-
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
-
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4
DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
-
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4
DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
-
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4
DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
-
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
-
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4
DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
-
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4
DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
-
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
-
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
-
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
-
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4
DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
-
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
-
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4
DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
-
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
-
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
-
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
-
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
-
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4
DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
-
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4
DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
-
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
-
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4
DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
-
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
-
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4
DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
-
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4
DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
-
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
-
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
-
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4
DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
-
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4
DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
-
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4
DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
-
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
-
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
-
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4
DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
-
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4
DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
-
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4
DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
-
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4
DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
-
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4
DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 800aab6e3e79..048b2655e6f8 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
index 4efeff9abbf4..484bb42e0a89 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
@@ -5,792 +5,665 @@
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
-
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
-
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
-
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8
DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
-
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8
DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
-
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8
DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
-
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8
DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
-
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8
DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
-
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
-
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
-
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
-
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
-
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8
DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
-
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8
DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
-
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
-
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
-
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
-
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
-
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8
DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
-
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
-
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
-
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8
DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
-
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8
DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
-
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
-
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
-
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8
DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
-
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8
DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
-
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
-
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
-
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8
DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
-
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8
DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
-
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
-
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
-
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
-
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8
DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
-
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8
DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
-
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8
DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
-
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
-
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
-
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
-
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8
DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
-
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
+DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
+
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
-
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8
DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
-
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
-
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
-
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
-
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8
DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
-
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8
DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
-
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
-
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
-
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
-
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
-
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
-
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
-
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8
DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
-
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
-
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
-
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
-
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
-
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8
DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
-
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
-
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
-
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
-
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
-
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
-
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
-
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
-
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
-
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
-
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
-
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
-
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
-
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
-
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
-
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
-
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8
DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
-
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8
DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
-
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8
DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
-
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
-
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8
DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
-
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
-
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8
DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
-
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
-
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
-
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
-
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
-
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
-
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
-
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
-
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
-
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8
DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
-
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8
DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
-
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
-
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8
DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
-
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8
DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
-
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8
DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
-
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
-
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
-
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8
DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
-
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8
DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
-
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8
DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
-
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
-
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8
DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
-
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8
DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
-
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
-
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
-
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
-
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8
DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
-
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
-
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
-
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
-
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
-
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
-
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
-
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
-
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
-
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
-
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
-
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
-
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8
DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
-
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
-
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
-
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
-
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8
DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
-
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
-
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8
DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
-
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
-
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
-
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8
DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
-
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8
DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
-
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
-
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
-
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index 016d959bc664..6f33e37e723f 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -1,4 +1,4 @@
-// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go
+// go run mksyscall.go -openbsd -libc -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build openbsd && mips64
@@ -16,7 +16,7 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
return
}
+var libc_getgroups_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getgroups getgroups "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setgroups_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err
return
}
+var libc_wait4_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_wait4 wait4 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
return
}
+var libc_accept_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_accept accept "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_bind_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_bind bind "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_connect_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connect connect "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) {
return
}
+var libc_socket_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_socket socket "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getsockopt_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setsockopt_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getpeername_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpeername getpeername "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getsockname_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getsockname getsockname "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
- _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
+ _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_shutdown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_shutdown shutdown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_socketpair_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_socketpair socketpair "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
@@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
return
}
+var libc_recvfrom_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
@@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_sendto_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sendto sendto "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
return
}
+var libc_recvmsg_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
return
}
+var libc_sendmsg_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+ r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne
return
}
+var libc_kevent_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_kevent kevent "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
@@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+ _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_utimes_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_utimes utimes "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
- _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+ _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_futimes_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_futimes futimes "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+ r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
return
}
+var libc_poll_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_poll poll "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Madvise(b []byte, behav int) (err error) {
@@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
+ _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_madvise_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_madvise madvise "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
@@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mlock_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mlock mlock "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
- _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mlockall_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mlockall mlockall "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
@@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mprotect_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mprotect mprotect "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Msync(b []byte, flags int) (err error) {
@@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
+ _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_msync_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_msync msync "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
@@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_munlock_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_munlock munlock "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
- _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_munlockall_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_munlockall munlockall "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
- _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_pipe2_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdents(fd int, buf []byte) (n int, err error) {
@@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) {
return
}
+var libc_getdents_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getdents getdents "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getcwd(buf []byte) (n int, err error) {
@@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) {
return
}
+var libc_getcwd_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getcwd getcwd "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ioctl(fd int, req uint, arg uintptr) (err error) {
- _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+ _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr)
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_sysctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
return
}
+var libc_ppoll_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ppoll ppoll "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
@@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_access_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_access access "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
- _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+ _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_adjtime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_adjtime adjtime "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
@@ -463,13 +607,17 @@ func Chdir(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chdir chdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
@@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chflags_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chflags chflags "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
@@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chmod_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chmod chmod "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
@@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chown chown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
@@ -523,27 +683,49 @@ func Chroot(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_chroot_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chroot chroot "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
- _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_close_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_close close "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
- r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
+ r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) {
return
}
+var libc_dup_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_dup dup "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
- _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
+ _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_dup2_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup3(from int, to int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags))
+ _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_dup3_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_dup3 dup3 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
- Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0)
return
}
+var libc_exit_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
@@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_faccessat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_faccessat faccessat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchdir fchdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchflags_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchflags fchflags "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchmod_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchmod fchmod "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
@@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchmodat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchown fchown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
@@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchownat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchownat fchownat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_flock_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_flock flock "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
+ r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) {
return
}
+var libc_fpathconf_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fstat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fstat fstat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
@@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fstatat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fstatfs_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fsync_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fsync fsync "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length))
+ _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_ftruncate_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
- r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0)
egid = int(r0)
return
}
+var libc_getegid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getegid getegid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0)
uid = int(r0)
return
}
+var libc_geteuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_geteuid geteuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
- r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0)
gid = int(r0)
return
}
+var libc_getgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getgid getgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) {
return
}
+var libc_getpgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpgid getpgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
- r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0)
pgrp = int(r0)
return
}
+var libc_getpgrp_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
- r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0)
pid = int(r0)
return
}
+var libc_getpid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpid getpid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
- r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0)
ppid = int(r0)
return
}
+var libc_getppid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getppid getppid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) {
return
}
+var libc_getpriority_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpriority getpriority "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getrlimit_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrtable() (rtable int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0)
rtable = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) {
return
}
+var libc_getrtable_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getrtable getrtable "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getrusage_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getrusage getrusage "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) {
return
}
+var libc_getsid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getsid getsid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_gettimeofday_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0)
uid = int(r0)
return
}
+var libc_getuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getuid getuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
- r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
+ r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
+var libc_issetugid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_issetugid issetugid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
- _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
+ _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_kill_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_kill kill "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
- r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
+ r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) {
return
}
+var libc_kqueue_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
@@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_lchown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_lchown lchown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
@@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_link_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_link link "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
@@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_linkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_linkat linkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
- _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
+ _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_listen_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_listen listen "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
@@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_lstat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_lstat lstat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
@@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
@@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkdirat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
@@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkfifo_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
@@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkfifoat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
@@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+ _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mknod_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mknod mknod "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
@@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mknodat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mknodat mknodat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
- _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_nanosleep_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
@@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) {
if err != nil {
return
}
- r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) {
return
}
+var libc_open_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_open open "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
@@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
if err != nil {
return
}
- r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
return
}
+var libc_openat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_openat openat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
@@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) {
if err != nil {
return
}
- r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+ r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1126,6 +1504,10 @@ func Pathconf(path string, name int) (val int, err error) {
return
}
+var libc_pathconf_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pathconf pathconf "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pread(fd int, p []byte, offset int64) (n int, err error) {
@@ -1135,7 +1517,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
+ r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1143,6 +1525,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) {
return
}
+var libc_pread_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pread pread "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
@@ -1152,7 +1538,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
+ r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1160,6 +1546,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) {
return
}
+var libc_pwrite_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwrite pwrite "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
@@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) {
return
}
+var libc_read_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_read read "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
@@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) {
} else {
_p1 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+ r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) {
return
}
+var libc_readlink_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readlink readlink "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
@@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
} else {
_p1 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
return
}
+var libc_readlinkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
@@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_rename_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_rename rename "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Renameat(fromfd int, from string, tofd int, to string) (err error) {
@@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_renameat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_renameat renameat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
@@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_revoke_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_revoke revoke "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
@@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_rmdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_rmdir rmdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0)
+ r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence))
newoffset = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
return
}
+var libc_lseek_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_lseek lseek "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
return
}
+var libc_select_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_select select "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setegid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setegid setegid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_seteuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_seteuid seteuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setgid setgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setlogin(name string) (err error) {
@@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setlogin_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setlogin setlogin "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setpgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setpgid setpgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setpriority_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setpriority setpriority "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setregid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setregid setregid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setreuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setreuid setreuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setresgid setresgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setresuid setresuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setrlimit_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrtable(rtable int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setrtable_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setrtable setrtable "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
- r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) {
return
}
+var libc_setsid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setsid setsid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_settimeofday_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setuid setuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
@@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_stat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_stat stat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, stat *Statfs_t) (err error) {
@@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_statfs_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_statfs statfs "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
@@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_symlink_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_symlink symlink "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
@@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_symlinkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
- _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
+ _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_sync_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sync sync "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
@@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length))
+ _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_truncate_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_truncate truncate "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
- r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
+ r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
+var libc_umask_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_umask umask "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
@@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_unlink_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unlink unlink "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlinkat(dirfd int, path string, flags int) (err error) {
@@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_unlinkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
@@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_unmount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unmount unmount "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
@@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) {
return
}
+var libc_write_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_write write "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (
return
}
+var libc_mmap_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_munmap_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+
+var libc_utimensat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
new file mode 100644
index 000000000000..55af27263ad7
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
@@ -0,0 +1,669 @@
+// go run mkasm.go openbsd mips64
+// Code generated by the command above; DO NOT EDIT.
+
+#include "textflag.h"
+
+TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getgroups(SB)
+GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
+
+TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setgroups(SB)
+GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
+
+TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_wait4(SB)
+GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8
+DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
+
+TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_accept(SB)
+GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8
+DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
+
+TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_bind(SB)
+GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8
+DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
+
+TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connect(SB)
+GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
+
+TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_socket(SB)
+GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8
+DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
+
+TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getsockopt(SB)
+GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
+
+TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setsockopt(SB)
+GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
+
+TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpeername(SB)
+GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
+
+TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getsockname(SB)
+GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
+
+TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_shutdown(SB)
+GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8
+DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
+
+TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_socketpair(SB)
+GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8
+DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
+
+TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_recvfrom(SB)
+GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8
+DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
+
+TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sendto(SB)
+GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
+
+TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_recvmsg(SB)
+GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8
+DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
+
+TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sendmsg(SB)
+GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
+
+TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_kevent(SB)
+GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8
+DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
+
+TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_utimes(SB)
+GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8
+DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
+
+TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_futimes(SB)
+GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8
+DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
+
+TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_poll(SB)
+GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8
+DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
+
+TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_madvise(SB)
+GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8
+DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
+
+TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mlock(SB)
+GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
+
+TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mlockall(SB)
+GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
+
+TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mprotect(SB)
+GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
+
+TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_msync(SB)
+GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8
+DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
+
+TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_munlock(SB)
+GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8
+DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
+
+TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_munlockall(SB)
+GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
+DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
+
+TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pipe2(SB)
+GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB)
+
+TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getdents(SB)
+GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB)
+
+TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getcwd(SB)
+GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
+
+TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_ioctl(SB)
+GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
+DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
+
+TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sysctl(SB)
+GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
+
+TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_ppoll(SB)
+GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8
+DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB)
+
+TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_access(SB)
+GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8
+DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
+
+TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_adjtime(SB)
+GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8
+DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
+
+TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chdir(SB)
+GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
+
+TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chflags(SB)
+GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8
+DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
+
+TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chmod(SB)
+GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8
+DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
+
+TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chown(SB)
+GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8
+DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
+
+TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chroot(SB)
+GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
+DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
+
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
+DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
+
+TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_close(SB)
+GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8
+DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
+
+TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_dup(SB)
+GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8
+DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
+
+TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_dup2(SB)
+GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8
+DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
+
+TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_dup3(SB)
+GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8
+DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB)
+
+TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_exit(SB)
+GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8
+DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
+
+TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_faccessat(SB)
+GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
+
+TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchdir(SB)
+GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
+
+TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchflags(SB)
+GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
+
+TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchmod(SB)
+GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
+
+TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchmodat(SB)
+GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
+
+TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchown(SB)
+GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
+
+TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchownat(SB)
+GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
+
+TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_flock(SB)
+GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8
+DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
+
+TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fpathconf(SB)
+GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
+
+TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fstat(SB)
+GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
+
+TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fstatat(SB)
+GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
+
+TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fstatfs(SB)
+GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
+
+TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fsync(SB)
+GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
+
+TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_ftruncate(SB)
+GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8
+DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
+
+TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getegid(SB)
+GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
+
+TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_geteuid(SB)
+GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
+
+TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getgid(SB)
+GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
+
+TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpgid(SB)
+GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
+
+TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpgrp(SB)
+GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
+
+TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpid(SB)
+GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
+
+TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getppid(SB)
+GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
+
+TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpriority(SB)
+GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
+
+TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getrlimit(SB)
+GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
+
+TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getrtable(SB)
+GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB)
+
+TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getrusage(SB)
+GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
+
+TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getsid(SB)
+GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
+
+TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_gettimeofday(SB)
+GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
+DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
+
+TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getuid(SB)
+GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
+
+TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_issetugid(SB)
+GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
+
+TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_kill(SB)
+GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8
+DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
+
+TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_kqueue(SB)
+GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8
+DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
+
+TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_lchown(SB)
+GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8
+DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
+
+TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_link(SB)
+GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8
+DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
+
+TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_linkat(SB)
+GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
+
+TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_listen(SB)
+GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8
+DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
+
+TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_lstat(SB)
+GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
+
+TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkdir(SB)
+GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
+
+TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkdirat(SB)
+GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
+
+TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkfifo(SB)
+GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
+
+TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkfifoat(SB)
+GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB)
+
+TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mknod(SB)
+GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
+
+TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mknodat(SB)
+GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
+
+TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_nanosleep(SB)
+GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
+DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB)
+
+TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_open(SB)
+GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8
+DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
+
+TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_openat(SB)
+GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
+
+TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pathconf(SB)
+GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
+
+TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pread(SB)
+GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
+
+TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwrite(SB)
+GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
+
+TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_read(SB)
+GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8
+DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
+
+TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readlink(SB)
+GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
+
+TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readlinkat(SB)
+GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
+
+TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_rename(SB)
+GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8
+DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
+
+TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_renameat(SB)
+GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
+
+TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_revoke(SB)
+GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8
+DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
+
+TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_rmdir(SB)
+GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
+
+TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_lseek(SB)
+GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8
+DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
+
+TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_select(SB)
+GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8
+DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
+
+TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setegid(SB)
+GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
+
+TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_seteuid(SB)
+GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
+
+TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setgid(SB)
+GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
+
+TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setlogin(SB)
+GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
+
+TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setpgid(SB)
+GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
+
+TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setpriority(SB)
+GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
+
+TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setregid(SB)
+GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
+
+TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setreuid(SB)
+GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
+
+TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setresgid(SB)
+GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB)
+
+TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setresuid(SB)
+GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB)
+
+TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setrlimit(SB)
+GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB)
+
+TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setrtable(SB)
+GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB)
+
+TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setsid(SB)
+GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
+
+TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_settimeofday(SB)
+GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8
+DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
+
+TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setuid(SB)
+GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
+
+TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_stat(SB)
+GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
+
+TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_statfs(SB)
+GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8
+DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
+
+TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_symlink(SB)
+GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8
+DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
+
+TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_symlinkat(SB)
+GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
+
+TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sync(SB)
+GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8
+DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
+
+TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_truncate(SB)
+GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8
+DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
+
+TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_umask(SB)
+GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8
+DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
+
+TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_unlink(SB)
+GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8
+DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
+
+TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_unlinkat(SB)
+GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
+
+TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_unmount(SB)
+GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
+
+TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_write(SB)
+GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8
+DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
+
+TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mmap(SB)
+GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
+
+TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_munmap(SB)
+GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
+DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+
+TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_utimensat(SB)
+GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8
+DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index c85de2d9766b..330cf7f7ac66 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
index 7c9223b64187..4028255b0d5b 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
@@ -249,6 +249,12 @@ TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ CALL libc_clock_gettime(SB)
+ RET
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
+DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
+
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
CALL libc_close(SB)
RET
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 8e3e7873f893..5f24de0d9d76 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_clock_gettime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
index 7dba789271ca..e1fbd4dfa8c8 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
@@ -5,792 +5,665 @@
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
-
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
-
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
-
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8
DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
-
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8
DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
-
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8
DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
-
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8
DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
-
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8
DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
-
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
-
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
-
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
-
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
-
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8
DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
-
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8
DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
-
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
-
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
-
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
-
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
-
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8
DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
-
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
-
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
-
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8
DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
-
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8
DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
-
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
-
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
-
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8
DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
-
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8
DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
-
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
-
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
-
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8
DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
-
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8
DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
-
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
-
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
-
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
-
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8
DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
-
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8
DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
-
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8
DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
-
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
-
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
-
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
-
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8
DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
-
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
+TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
+GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
+DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
+
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
-
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8
DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
-
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
-
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
-
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
-
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8
DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
-
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8
DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
-
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
-
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
-
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
-
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
-
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
-
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
-
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8
DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
-
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
-
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
-
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
-
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
-
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8
DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
-
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
-
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
-
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
-
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
-
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
-
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
-
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
-
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
-
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
-
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
-
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
-
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
-
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
-
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
-
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
-
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8
DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
-
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8
DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
-
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8
DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
-
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
-
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8
DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
-
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
-
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8
DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
-
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
-
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
-
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
-
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
-
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
-
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
-
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
-
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
-
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8
DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
-
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8
DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
-
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
-
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8
DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
-
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8
DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
-
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8
DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
-
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
-
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
-
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8
DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
-
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8
DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
-
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8
DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
-
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
-
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8
DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
-
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8
DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
-
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
-
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
-
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
-
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8
DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
-
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
-
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
-
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
-
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
-
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
-
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
-
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
-
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
-
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
-
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
-
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
-
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8
DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
-
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
-
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
-
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
-
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8
DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
-
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
-
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8
DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
-
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
-
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
-
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8
DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
-
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8
DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
-
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
-
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
-
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 91f5a2bde282..78d4a4240e9c 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -38,6 +38,7 @@ import (
//go:cgo_import_dynamic libc_chmod chmod "libc.so"
//go:cgo_import_dynamic libc_chown chown "libc.so"
//go:cgo_import_dynamic libc_chroot chroot "libc.so"
+//go:cgo_import_dynamic libc_clockgettime clockgettime "libc.so"
//go:cgo_import_dynamic libc_close close "libc.so"
//go:cgo_import_dynamic libc_creat creat "libc.so"
//go:cgo_import_dynamic libc_dup dup "libc.so"
@@ -177,6 +178,7 @@ import (
//go:linkname procChmod libc_chmod
//go:linkname procChown libc_chown
//go:linkname procChroot libc_chroot
+//go:linkname procClockGettime libc_clockgettime
//go:linkname procClose libc_close
//go:linkname procCreat libc_creat
//go:linkname procDup libc_dup
@@ -317,6 +319,7 @@ var (
procChmod,
procChown,
procChroot,
+ procClockGettime,
procClose,
procCreat,
procDup,
@@ -750,6 +753,16 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0)
if e1 != 0 {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
index 9e9d0b2a9c45..55e0484719c4 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
@@ -17,6 +17,7 @@ var sysctlMib = []mibentry{
{"ddb.max_line", []_C_int{9, 3}},
{"ddb.max_width", []_C_int{9, 2}},
{"ddb.panic", []_C_int{9, 5}},
+ {"ddb.profile", []_C_int{9, 9}},
{"ddb.radix", []_C_int{9, 1}},
{"ddb.tab_stop_width", []_C_int{9, 4}},
{"ddb.trigger", []_C_int{9, 8}},
@@ -33,29 +34,37 @@ var sysctlMib = []mibentry{
{"hw.ncpufound", []_C_int{6, 21}},
{"hw.ncpuonline", []_C_int{6, 25}},
{"hw.pagesize", []_C_int{6, 7}},
+ {"hw.perfpolicy", []_C_int{6, 23}},
{"hw.physmem", []_C_int{6, 19}},
+ {"hw.power", []_C_int{6, 26}},
{"hw.product", []_C_int{6, 15}},
{"hw.serialno", []_C_int{6, 17}},
{"hw.setperf", []_C_int{6, 13}},
+ {"hw.smt", []_C_int{6, 24}},
{"hw.usermem", []_C_int{6, 20}},
{"hw.uuid", []_C_int{6, 18}},
{"hw.vendor", []_C_int{6, 14}},
{"hw.version", []_C_int{6, 16}},
- {"kern.arandom", []_C_int{1, 37}},
+ {"kern.allowdt", []_C_int{1, 65}},
+ {"kern.allowkmem", []_C_int{1, 52}},
{"kern.argmax", []_C_int{1, 8}},
+ {"kern.audio", []_C_int{1, 84}},
{"kern.boottime", []_C_int{1, 21}},
{"kern.bufcachepercent", []_C_int{1, 72}},
{"kern.ccpu", []_C_int{1, 45}},
{"kern.clockrate", []_C_int{1, 12}},
+ {"kern.consbuf", []_C_int{1, 83}},
+ {"kern.consbufsize", []_C_int{1, 82}},
{"kern.consdev", []_C_int{1, 75}},
{"kern.cp_time", []_C_int{1, 40}},
{"kern.cp_time2", []_C_int{1, 71}},
- {"kern.cryptodevallowsoft", []_C_int{1, 53}},
+ {"kern.cpustats", []_C_int{1, 85}},
{"kern.domainname", []_C_int{1, 22}},
{"kern.file", []_C_int{1, 73}},
{"kern.forkstat", []_C_int{1, 42}},
{"kern.fscale", []_C_int{1, 46}},
{"kern.fsync", []_C_int{1, 33}},
+ {"kern.global_ptrace", []_C_int{1, 81}},
{"kern.hostid", []_C_int{1, 11}},
{"kern.hostname", []_C_int{1, 10}},
{"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}},
@@ -78,17 +87,16 @@ var sysctlMib = []mibentry{
{"kern.ngroups", []_C_int{1, 18}},
{"kern.nosuidcoredump", []_C_int{1, 32}},
{"kern.nprocs", []_C_int{1, 47}},
- {"kern.nselcoll", []_C_int{1, 43}},
{"kern.nthreads", []_C_int{1, 26}},
{"kern.numvnodes", []_C_int{1, 58}},
{"kern.osrelease", []_C_int{1, 2}},
{"kern.osrevision", []_C_int{1, 3}},
{"kern.ostype", []_C_int{1, 1}},
{"kern.osversion", []_C_int{1, 27}},
+ {"kern.pfstatus", []_C_int{1, 86}},
{"kern.pool_debug", []_C_int{1, 77}},
{"kern.posix1version", []_C_int{1, 17}},
{"kern.proc", []_C_int{1, 66}},
- {"kern.random", []_C_int{1, 31}},
{"kern.rawpartition", []_C_int{1, 24}},
{"kern.saved_ids", []_C_int{1, 20}},
{"kern.securelevel", []_C_int{1, 9}},
@@ -106,21 +114,20 @@ var sysctlMib = []mibentry{
{"kern.timecounter.hardware", []_C_int{1, 69, 3}},
{"kern.timecounter.tick", []_C_int{1, 69, 1}},
{"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
- {"kern.tty.maxptys", []_C_int{1, 44, 6}},
- {"kern.tty.nptys", []_C_int{1, 44, 7}},
+ {"kern.timeout_stats", []_C_int{1, 87}},
{"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
{"kern.tty.tk_nin", []_C_int{1, 44, 1}},
{"kern.tty.tk_nout", []_C_int{1, 44, 2}},
{"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
{"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
{"kern.ttycount", []_C_int{1, 57}},
- {"kern.userasymcrypto", []_C_int{1, 60}},
- {"kern.usercrypto", []_C_int{1, 52}},
- {"kern.usermount", []_C_int{1, 30}},
+ {"kern.utc_offset", []_C_int{1, 88}},
{"kern.version", []_C_int{1, 4}},
- {"kern.vnode", []_C_int{1, 13}},
+ {"kern.video", []_C_int{1, 89}},
{"kern.watchdog.auto", []_C_int{1, 64, 2}},
{"kern.watchdog.period", []_C_int{1, 64, 1}},
+ {"kern.witnesswatch", []_C_int{1, 53}},
+ {"kern.wxabort", []_C_int{1, 74}},
{"net.bpf.bufsize", []_C_int{4, 31, 1}},
{"net.bpf.maxbufsize", []_C_int{4, 31, 2}},
{"net.inet.ah.enable", []_C_int{4, 2, 51, 1}},
@@ -148,7 +155,9 @@ var sysctlMib = []mibentry{
{"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}},
{"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}},
{"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}},
+ {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}},
{"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}},
+ {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}},
{"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}},
{"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}},
{"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}},
@@ -157,8 +166,10 @@ var sysctlMib = []mibentry{
{"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}},
{"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}},
{"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}},
+ {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}},
{"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}},
{"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}},
+ {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}},
{"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}},
{"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}},
{"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}},
@@ -175,9 +186,7 @@ var sysctlMib = []mibentry{
{"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
{"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
{"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
- {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}},
{"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
- {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}},
{"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
{"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
{"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}},
@@ -191,6 +200,7 @@ var sysctlMib = []mibentry{
{"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}},
{"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}},
{"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}},
+ {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}},
{"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}},
{"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}},
{"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}},
@@ -198,9 +208,12 @@ var sysctlMib = []mibentry{
{"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}},
{"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}},
{"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}},
+ {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}},
+ {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}},
{"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}},
{"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}},
{"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}},
+ {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}},
{"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}},
{"net.inet.udp.stats", []_C_int{4, 2, 17, 5}},
{"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}},
@@ -213,13 +226,8 @@ var sysctlMib = []mibentry{
{"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}},
{"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}},
{"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}},
- {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}},
{"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}},
- {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}},
- {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}},
- {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}},
{"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}},
- {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}},
{"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}},
{"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}},
{"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}},
@@ -232,20 +240,19 @@ var sysctlMib = []mibentry{
{"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}},
{"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}},
{"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}},
- {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}},
- {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}},
{"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}},
+ {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}},
+ {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}},
{"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}},
{"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}},
{"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}},
{"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}},
{"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}},
{"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}},
- {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}},
+ {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}},
{"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}},
{"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}},
{"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}},
- {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}},
{"net.key.sadb_dump", []_C_int{4, 30, 1}},
{"net.key.spd_dump", []_C_int{4, 30, 2}},
{"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}},
@@ -254,12 +261,12 @@ var sysctlMib = []mibentry{
{"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
{"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
{"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
- {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}},
{"net.mpls.ttl", []_C_int{4, 33, 2}},
{"net.pflow.stats", []_C_int{4, 34, 1}},
{"net.pipex.enable", []_C_int{4, 35, 1}},
{"vm.anonmin", []_C_int{2, 7}},
{"vm.loadavg", []_C_int{2, 2}},
+ {"vm.malloc_conf", []_C_int{2, 12}},
{"vm.maxslp", []_C_int{2, 10}},
{"vm.nkmempages", []_C_int{2, 6}},
{"vm.psstrings", []_C_int{2, 3}},
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
index adecd09667d0..d2243cf83f5b 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
@@ -36,23 +36,29 @@ var sysctlMib = []mibentry{
{"hw.pagesize", []_C_int{6, 7}},
{"hw.perfpolicy", []_C_int{6, 23}},
{"hw.physmem", []_C_int{6, 19}},
+ {"hw.power", []_C_int{6, 26}},
{"hw.product", []_C_int{6, 15}},
{"hw.serialno", []_C_int{6, 17}},
{"hw.setperf", []_C_int{6, 13}},
+ {"hw.smt", []_C_int{6, 24}},
{"hw.usermem", []_C_int{6, 20}},
{"hw.uuid", []_C_int{6, 18}},
{"hw.vendor", []_C_int{6, 14}},
{"hw.version", []_C_int{6, 16}},
+ {"kern.allowdt", []_C_int{1, 65}},
{"kern.allowkmem", []_C_int{1, 52}},
{"kern.argmax", []_C_int{1, 8}},
+ {"kern.audio", []_C_int{1, 84}},
{"kern.boottime", []_C_int{1, 21}},
{"kern.bufcachepercent", []_C_int{1, 72}},
{"kern.ccpu", []_C_int{1, 45}},
{"kern.clockrate", []_C_int{1, 12}},
+ {"kern.consbuf", []_C_int{1, 83}},
+ {"kern.consbufsize", []_C_int{1, 82}},
{"kern.consdev", []_C_int{1, 75}},
{"kern.cp_time", []_C_int{1, 40}},
{"kern.cp_time2", []_C_int{1, 71}},
- {"kern.dnsjackport", []_C_int{1, 13}},
+ {"kern.cpustats", []_C_int{1, 85}},
{"kern.domainname", []_C_int{1, 22}},
{"kern.file", []_C_int{1, 73}},
{"kern.forkstat", []_C_int{1, 42}},
@@ -81,13 +87,13 @@ var sysctlMib = []mibentry{
{"kern.ngroups", []_C_int{1, 18}},
{"kern.nosuidcoredump", []_C_int{1, 32}},
{"kern.nprocs", []_C_int{1, 47}},
- {"kern.nselcoll", []_C_int{1, 43}},
{"kern.nthreads", []_C_int{1, 26}},
{"kern.numvnodes", []_C_int{1, 58}},
{"kern.osrelease", []_C_int{1, 2}},
{"kern.osrevision", []_C_int{1, 3}},
{"kern.ostype", []_C_int{1, 1}},
{"kern.osversion", []_C_int{1, 27}},
+ {"kern.pfstatus", []_C_int{1, 86}},
{"kern.pool_debug", []_C_int{1, 77}},
{"kern.posix1version", []_C_int{1, 17}},
{"kern.proc", []_C_int{1, 66}},
@@ -108,15 +114,19 @@ var sysctlMib = []mibentry{
{"kern.timecounter.hardware", []_C_int{1, 69, 3}},
{"kern.timecounter.tick", []_C_int{1, 69, 1}},
{"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
+ {"kern.timeout_stats", []_C_int{1, 87}},
{"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
{"kern.tty.tk_nin", []_C_int{1, 44, 1}},
{"kern.tty.tk_nout", []_C_int{1, 44, 2}},
{"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
{"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
{"kern.ttycount", []_C_int{1, 57}},
+ {"kern.utc_offset", []_C_int{1, 88}},
{"kern.version", []_C_int{1, 4}},
+ {"kern.video", []_C_int{1, 89}},
{"kern.watchdog.auto", []_C_int{1, 64, 2}},
{"kern.watchdog.period", []_C_int{1, 64, 1}},
+ {"kern.witnesswatch", []_C_int{1, 53}},
{"kern.wxabort", []_C_int{1, 74}},
{"net.bpf.bufsize", []_C_int{4, 31, 1}},
{"net.bpf.maxbufsize", []_C_int{4, 31, 2}},
@@ -176,7 +186,6 @@ var sysctlMib = []mibentry{
{"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
{"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
{"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
- {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}},
{"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
{"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
{"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
@@ -252,12 +261,12 @@ var sysctlMib = []mibentry{
{"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
{"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
{"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
- {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}},
{"net.mpls.ttl", []_C_int{4, 33, 2}},
{"net.pflow.stats", []_C_int{4, 34, 1}},
{"net.pipex.enable", []_C_int{4, 35, 1}},
{"vm.anonmin", []_C_int{2, 7}},
{"vm.loadavg", []_C_int{2, 2}},
+ {"vm.malloc_conf", []_C_int{2, 12}},
{"vm.maxslp", []_C_int{2, 10}},
{"vm.nkmempages", []_C_int{2, 6}},
{"vm.psstrings", []_C_int{2, 3}},
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
index 8ea52a4a1810..82dc51bd8b57 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
@@ -17,6 +17,7 @@ var sysctlMib = []mibentry{
{"ddb.max_line", []_C_int{9, 3}},
{"ddb.max_width", []_C_int{9, 2}},
{"ddb.panic", []_C_int{9, 5}},
+ {"ddb.profile", []_C_int{9, 9}},
{"ddb.radix", []_C_int{9, 1}},
{"ddb.tab_stop_width", []_C_int{9, 4}},
{"ddb.trigger", []_C_int{9, 8}},
@@ -33,29 +34,37 @@ var sysctlMib = []mibentry{
{"hw.ncpufound", []_C_int{6, 21}},
{"hw.ncpuonline", []_C_int{6, 25}},
{"hw.pagesize", []_C_int{6, 7}},
+ {"hw.perfpolicy", []_C_int{6, 23}},
{"hw.physmem", []_C_int{6, 19}},
+ {"hw.power", []_C_int{6, 26}},
{"hw.product", []_C_int{6, 15}},
{"hw.serialno", []_C_int{6, 17}},
{"hw.setperf", []_C_int{6, 13}},
+ {"hw.smt", []_C_int{6, 24}},
{"hw.usermem", []_C_int{6, 20}},
{"hw.uuid", []_C_int{6, 18}},
{"hw.vendor", []_C_int{6, 14}},
{"hw.version", []_C_int{6, 16}},
- {"kern.arandom", []_C_int{1, 37}},
+ {"kern.allowdt", []_C_int{1, 65}},
+ {"kern.allowkmem", []_C_int{1, 52}},
{"kern.argmax", []_C_int{1, 8}},
+ {"kern.audio", []_C_int{1, 84}},
{"kern.boottime", []_C_int{1, 21}},
{"kern.bufcachepercent", []_C_int{1, 72}},
{"kern.ccpu", []_C_int{1, 45}},
{"kern.clockrate", []_C_int{1, 12}},
+ {"kern.consbuf", []_C_int{1, 83}},
+ {"kern.consbufsize", []_C_int{1, 82}},
{"kern.consdev", []_C_int{1, 75}},
{"kern.cp_time", []_C_int{1, 40}},
{"kern.cp_time2", []_C_int{1, 71}},
- {"kern.cryptodevallowsoft", []_C_int{1, 53}},
+ {"kern.cpustats", []_C_int{1, 85}},
{"kern.domainname", []_C_int{1, 22}},
{"kern.file", []_C_int{1, 73}},
{"kern.forkstat", []_C_int{1, 42}},
{"kern.fscale", []_C_int{1, 46}},
{"kern.fsync", []_C_int{1, 33}},
+ {"kern.global_ptrace", []_C_int{1, 81}},
{"kern.hostid", []_C_int{1, 11}},
{"kern.hostname", []_C_int{1, 10}},
{"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}},
@@ -78,17 +87,16 @@ var sysctlMib = []mibentry{
{"kern.ngroups", []_C_int{1, 18}},
{"kern.nosuidcoredump", []_C_int{1, 32}},
{"kern.nprocs", []_C_int{1, 47}},
- {"kern.nselcoll", []_C_int{1, 43}},
{"kern.nthreads", []_C_int{1, 26}},
{"kern.numvnodes", []_C_int{1, 58}},
{"kern.osrelease", []_C_int{1, 2}},
{"kern.osrevision", []_C_int{1, 3}},
{"kern.ostype", []_C_int{1, 1}},
{"kern.osversion", []_C_int{1, 27}},
+ {"kern.pfstatus", []_C_int{1, 86}},
{"kern.pool_debug", []_C_int{1, 77}},
{"kern.posix1version", []_C_int{1, 17}},
{"kern.proc", []_C_int{1, 66}},
- {"kern.random", []_C_int{1, 31}},
{"kern.rawpartition", []_C_int{1, 24}},
{"kern.saved_ids", []_C_int{1, 20}},
{"kern.securelevel", []_C_int{1, 9}},
@@ -106,21 +114,20 @@ var sysctlMib = []mibentry{
{"kern.timecounter.hardware", []_C_int{1, 69, 3}},
{"kern.timecounter.tick", []_C_int{1, 69, 1}},
{"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
- {"kern.tty.maxptys", []_C_int{1, 44, 6}},
- {"kern.tty.nptys", []_C_int{1, 44, 7}},
+ {"kern.timeout_stats", []_C_int{1, 87}},
{"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
{"kern.tty.tk_nin", []_C_int{1, 44, 1}},
{"kern.tty.tk_nout", []_C_int{1, 44, 2}},
{"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
{"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
{"kern.ttycount", []_C_int{1, 57}},
- {"kern.userasymcrypto", []_C_int{1, 60}},
- {"kern.usercrypto", []_C_int{1, 52}},
- {"kern.usermount", []_C_int{1, 30}},
+ {"kern.utc_offset", []_C_int{1, 88}},
{"kern.version", []_C_int{1, 4}},
- {"kern.vnode", []_C_int{1, 13}},
+ {"kern.video", []_C_int{1, 89}},
{"kern.watchdog.auto", []_C_int{1, 64, 2}},
{"kern.watchdog.period", []_C_int{1, 64, 1}},
+ {"kern.witnesswatch", []_C_int{1, 53}},
+ {"kern.wxabort", []_C_int{1, 74}},
{"net.bpf.bufsize", []_C_int{4, 31, 1}},
{"net.bpf.maxbufsize", []_C_int{4, 31, 2}},
{"net.inet.ah.enable", []_C_int{4, 2, 51, 1}},
@@ -148,7 +155,9 @@ var sysctlMib = []mibentry{
{"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}},
{"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}},
{"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}},
+ {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}},
{"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}},
+ {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}},
{"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}},
{"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}},
{"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}},
@@ -157,8 +166,10 @@ var sysctlMib = []mibentry{
{"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}},
{"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}},
{"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}},
+ {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}},
{"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}},
{"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}},
+ {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}},
{"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}},
{"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}},
{"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}},
@@ -175,9 +186,7 @@ var sysctlMib = []mibentry{
{"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
{"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
{"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
- {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}},
{"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
- {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}},
{"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
{"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
{"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}},
@@ -191,6 +200,7 @@ var sysctlMib = []mibentry{
{"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}},
{"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}},
{"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}},
+ {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}},
{"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}},
{"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}},
{"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}},
@@ -198,9 +208,12 @@ var sysctlMib = []mibentry{
{"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}},
{"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}},
{"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}},
+ {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}},
+ {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}},
{"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}},
{"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}},
{"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}},
+ {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}},
{"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}},
{"net.inet.udp.stats", []_C_int{4, 2, 17, 5}},
{"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}},
@@ -213,13 +226,8 @@ var sysctlMib = []mibentry{
{"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}},
{"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}},
{"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}},
- {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}},
{"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}},
- {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}},
- {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}},
- {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}},
{"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}},
- {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}},
{"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}},
{"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}},
{"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}},
@@ -232,20 +240,19 @@ var sysctlMib = []mibentry{
{"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}},
{"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}},
{"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}},
- {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}},
- {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}},
{"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}},
+ {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}},
+ {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}},
{"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}},
{"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}},
{"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}},
{"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}},
{"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}},
{"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}},
- {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}},
+ {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}},
{"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}},
{"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}},
{"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}},
- {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}},
{"net.key.sadb_dump", []_C_int{4, 30, 1}},
{"net.key.spd_dump", []_C_int{4, 30, 2}},
{"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}},
@@ -254,12 +261,12 @@ var sysctlMib = []mibentry{
{"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
{"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
{"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
- {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}},
{"net.mpls.ttl", []_C_int{4, 33, 2}},
{"net.pflow.stats", []_C_int{4, 34, 1}},
{"net.pipex.enable", []_C_int{4, 35, 1}},
{"vm.anonmin", []_C_int{2, 7}},
{"vm.loadavg", []_C_int{2, 2}},
+ {"vm.malloc_conf", []_C_int{2, 12}},
{"vm.maxslp", []_C_int{2, 10}},
{"vm.nkmempages", []_C_int{2, 6}},
{"vm.psstrings", []_C_int{2, 3}},
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
index 154b57ae3e2a..cbdda1a4ae24 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
@@ -36,6 +36,7 @@ var sysctlMib = []mibentry{
{"hw.pagesize", []_C_int{6, 7}},
{"hw.perfpolicy", []_C_int{6, 23}},
{"hw.physmem", []_C_int{6, 19}},
+ {"hw.power", []_C_int{6, 26}},
{"hw.product", []_C_int{6, 15}},
{"hw.serialno", []_C_int{6, 17}},
{"hw.setperf", []_C_int{6, 13}},
@@ -44,6 +45,7 @@ var sysctlMib = []mibentry{
{"hw.uuid", []_C_int{6, 18}},
{"hw.vendor", []_C_int{6, 14}},
{"hw.version", []_C_int{6, 16}},
+ {"kern.allowdt", []_C_int{1, 65}},
{"kern.allowkmem", []_C_int{1, 52}},
{"kern.argmax", []_C_int{1, 8}},
{"kern.audio", []_C_int{1, 84}},
@@ -51,6 +53,8 @@ var sysctlMib = []mibentry{
{"kern.bufcachepercent", []_C_int{1, 72}},
{"kern.ccpu", []_C_int{1, 45}},
{"kern.clockrate", []_C_int{1, 12}},
+ {"kern.consbuf", []_C_int{1, 83}},
+ {"kern.consbufsize", []_C_int{1, 82}},
{"kern.consdev", []_C_int{1, 75}},
{"kern.cp_time", []_C_int{1, 40}},
{"kern.cp_time2", []_C_int{1, 71}},
@@ -83,13 +87,13 @@ var sysctlMib = []mibentry{
{"kern.ngroups", []_C_int{1, 18}},
{"kern.nosuidcoredump", []_C_int{1, 32}},
{"kern.nprocs", []_C_int{1, 47}},
- {"kern.nselcoll", []_C_int{1, 43}},
{"kern.nthreads", []_C_int{1, 26}},
{"kern.numvnodes", []_C_int{1, 58}},
{"kern.osrelease", []_C_int{1, 2}},
{"kern.osrevision", []_C_int{1, 3}},
{"kern.ostype", []_C_int{1, 1}},
{"kern.osversion", []_C_int{1, 27}},
+ {"kern.pfstatus", []_C_int{1, 86}},
{"kern.pool_debug", []_C_int{1, 77}},
{"kern.posix1version", []_C_int{1, 17}},
{"kern.proc", []_C_int{1, 66}},
@@ -110,13 +114,16 @@ var sysctlMib = []mibentry{
{"kern.timecounter.hardware", []_C_int{1, 69, 3}},
{"kern.timecounter.tick", []_C_int{1, 69, 1}},
{"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
+ {"kern.timeout_stats", []_C_int{1, 87}},
{"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
{"kern.tty.tk_nin", []_C_int{1, 44, 1}},
{"kern.tty.tk_nout", []_C_int{1, 44, 2}},
{"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
{"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
{"kern.ttycount", []_C_int{1, 57}},
+ {"kern.utc_offset", []_C_int{1, 88}},
{"kern.version", []_C_int{1, 4}},
+ {"kern.video", []_C_int{1, 89}},
{"kern.watchdog.auto", []_C_int{1, 64, 2}},
{"kern.watchdog.period", []_C_int{1, 64, 1}},
{"kern.witnesswatch", []_C_int{1, 53}},
@@ -179,7 +186,6 @@ var sysctlMib = []mibentry{
{"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
{"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
{"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
- {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}},
{"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
{"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
{"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
@@ -255,7 +261,6 @@ var sysctlMib = []mibentry{
{"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
{"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
{"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
- {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}},
{"net.mpls.ttl", []_C_int{4, 33, 2}},
{"net.pflow.stats", []_C_int{4, 34, 1}},
{"net.pipex.enable", []_C_int{4, 35, 1}},
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
index d96bb2ba4db6..f55eae1a8211 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
@@ -36,6 +36,7 @@ var sysctlMib = []mibentry{
{"hw.pagesize", []_C_int{6, 7}},
{"hw.perfpolicy", []_C_int{6, 23}},
{"hw.physmem", []_C_int{6, 19}},
+ {"hw.power", []_C_int{6, 26}},
{"hw.product", []_C_int{6, 15}},
{"hw.serialno", []_C_int{6, 17}},
{"hw.setperf", []_C_int{6, 13}},
@@ -86,7 +87,6 @@ var sysctlMib = []mibentry{
{"kern.ngroups", []_C_int{1, 18}},
{"kern.nosuidcoredump", []_C_int{1, 32}},
{"kern.nprocs", []_C_int{1, 47}},
- {"kern.nselcoll", []_C_int{1, 43}},
{"kern.nthreads", []_C_int{1, 26}},
{"kern.numvnodes", []_C_int{1, 58}},
{"kern.osrelease", []_C_int{1, 2}},
@@ -123,6 +123,7 @@ var sysctlMib = []mibentry{
{"kern.ttycount", []_C_int{1, 57}},
{"kern.utc_offset", []_C_int{1, 88}},
{"kern.version", []_C_int{1, 4}},
+ {"kern.video", []_C_int{1, 89}},
{"kern.watchdog.auto", []_C_int{1, 64, 2}},
{"kern.watchdog.period", []_C_int{1, 64, 1}},
{"kern.witnesswatch", []_C_int{1, 53}},
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
index a37f77375636..01c43a01fda7 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
@@ -6,6 +6,7 @@
package unix
+// Deprecated: Use libc wrappers instead of direct syscalls.
const (
SYS_EXIT = 1 // { void sys_exit(int rval); }
SYS_FORK = 2 // { int sys_fork(void); }
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
index 2fd2060e617a..9bc4c8f9d889 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
@@ -491,6 +491,90 @@ type Utsname struct {
Machine [256]byte
}
+const SizeofUvmexp = 0x278
+
+type Uvmexp struct {
+ Pagesize int64
+ Pagemask int64
+ Pageshift int64
+ Npages int64
+ Free int64
+ Active int64
+ Inactive int64
+ Paging int64
+ Wired int64
+ Zeropages int64
+ Reserve_pagedaemon int64
+ Reserve_kernel int64
+ Freemin int64
+ Freetarg int64
+ Inactarg int64
+ Wiredmax int64
+ Nswapdev int64
+ Swpages int64
+ Swpginuse int64
+ Swpgonly int64
+ Nswget int64
+ Unused1 int64
+ Cpuhit int64
+ Cpumiss int64
+ Faults int64
+ Traps int64
+ Intrs int64
+ Swtch int64
+ Softs int64
+ Syscalls int64
+ Pageins int64
+ Swapins int64
+ Swapouts int64
+ Pgswapin int64
+ Pgswapout int64
+ Forks int64
+ Forks_ppwait int64
+ Forks_sharevm int64
+ Pga_zerohit int64
+ Pga_zeromiss int64
+ Zeroaborts int64
+ Fltnoram int64
+ Fltnoanon int64
+ Fltpgwait int64
+ Fltpgrele int64
+ Fltrelck int64
+ Fltrelckok int64
+ Fltanget int64
+ Fltanretry int64
+ Fltamcopy int64
+ Fltnamap int64
+ Fltnomap int64
+ Fltlget int64
+ Fltget int64
+ Flt_anon int64
+ Flt_acow int64
+ Flt_obj int64
+ Flt_prcopy int64
+ Flt_przero int64
+ Pdwoke int64
+ Pdrevs int64
+ Unused4 int64
+ Pdfreed int64
+ Pdscans int64
+ Pdanscan int64
+ Pdobscan int64
+ Pdreact int64
+ Pdbusy int64
+ Pdpageouts int64
+ Pdpending int64
+ Pddeact int64
+ Anonpages int64
+ Filepages int64
+ Execpages int64
+ Colorhit int64
+ Colormiss int64
+ Ncolors int64
+ Bootpages int64
+ Poolpages int64
+}
+
const SizeofClockinfo = 0x14
type Clockinfo struct {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
index 6a5a1a8ae556..bb05f655d225 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
@@ -499,6 +499,90 @@ type Utsname struct {
Machine [256]byte
}
+const SizeofUvmexp = 0x278
+
+type Uvmexp struct {
+ Pagesize int64
+ Pagemask int64
+ Pageshift int64
+ Npages int64
+ Free int64
+ Active int64
+ Inactive int64
+ Paging int64
+ Wired int64
+ Zeropages int64
+ Reserve_pagedaemon int64
+ Reserve_kernel int64
+ Freemin int64
+ Freetarg int64
+ Inactarg int64
+ Wiredmax int64
+ Nswapdev int64
+ Swpages int64
+ Swpginuse int64
+ Swpgonly int64
+ Nswget int64
+ Unused1 int64
+ Cpuhit int64
+ Cpumiss int64
+ Faults int64
+ Traps int64
+ Intrs int64
+ Swtch int64
+ Softs int64
+ Syscalls int64
+ Pageins int64
+ Swapins int64
+ Swapouts int64
+ Pgswapin int64
+ Pgswapout int64
+ Forks int64
+ Forks_ppwait int64
+ Forks_sharevm int64
+ Pga_zerohit int64
+ Pga_zeromiss int64
+ Zeroaborts int64
+ Fltnoram int64
+ Fltnoanon int64
+ Fltpgwait int64
+ Fltpgrele int64
+ Fltrelck int64
+ Fltrelckok int64
+ Fltanget int64
+ Fltanretry int64
+ Fltamcopy int64
+ Fltnamap int64
+ Fltnomap int64
+ Fltlget int64
+ Fltget int64
+ Flt_anon int64
+ Flt_acow int64
+ Flt_obj int64
+ Flt_prcopy int64
+ Flt_przero int64
+ Pdwoke int64
+ Pdrevs int64
+ Unused4 int64
+ Pdfreed int64
+ Pdscans int64
+ Pdanscan int64
+ Pdobscan int64
+ Pdreact int64
+ Pdbusy int64
+ Pdpageouts int64
+ Pdpending int64
+ Pddeact int64
+ Anonpages int64
+ Filepages int64
+ Execpages int64
+ Colorhit int64
+ Colormiss int64
+ Ncolors int64
+ Bootpages int64
+ Poolpages int64
+}
+
const SizeofClockinfo = 0x14
type Clockinfo struct {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
index 84cc8d01e656..db40e3a19c66 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
@@ -496,6 +496,90 @@ type Utsname struct {
Machine [256]byte
}
+const SizeofUvmexp = 0x278
+
+type Uvmexp struct {
+ Pagesize int64
+ Pagemask int64
+ Pageshift int64
+ Npages int64
+ Free int64
+ Active int64
+ Inactive int64
+ Paging int64
+ Wired int64
+ Zeropages int64
+ Reserve_pagedaemon int64
+ Reserve_kernel int64
+ Freemin int64
+ Freetarg int64
+ Inactarg int64
+ Wiredmax int64
+ Nswapdev int64
+ Swpages int64
+ Swpginuse int64
+ Swpgonly int64
+ Nswget int64
+ Unused1 int64
+ Cpuhit int64
+ Cpumiss int64
+ Faults int64
+ Traps int64
+ Intrs int64
+ Swtch int64
+ Softs int64
+ Syscalls int64
+ Pageins int64
+ Swapins int64
+ Swapouts int64
+ Pgswapin int64
+ Pgswapout int64
+ Forks int64
+ Forks_ppwait int64
+ Forks_sharevm int64
+ Pga_zerohit int64
+ Pga_zeromiss int64
+ Zeroaborts int64
+ Fltnoram int64
+ Fltnoanon int64
+ Fltpgwait int64
+ Fltpgrele int64
+ Fltrelck int64
+ Fltrelckok int64
+ Fltanget int64
+ Fltanretry int64
+ Fltamcopy int64
+ Fltnamap int64
+ Fltnomap int64
+ Fltlget int64
+ Fltget int64
+ Flt_anon int64
+ Flt_acow int64
+ Flt_obj int64
+ Flt_prcopy int64
+ Flt_przero int64
+ Pdwoke int64
+ Pdrevs int64
+ Unused4 int64
+ Pdfreed int64
+ Pdscans int64
+ Pdanscan int64
+ Pdobscan int64
+ Pdreact int64
+ Pdbusy int64
+ Pdpageouts int64
+ Pdpending int64
+ Pddeact int64
+ Anonpages int64
+ Filepages int64
+ Execpages int64
+ Colorhit int64
+ Colormiss int64
+ Ncolors int64
+ Bootpages int64
+ Poolpages int64
+}
+
const SizeofClockinfo = 0x14
type Clockinfo struct {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
index c844e7096ff5..11121151ccf0 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
@@ -499,6 +499,90 @@ type Utsname struct {
Machine [256]byte
}
+const SizeofUvmexp = 0x278
+
+type Uvmexp struct {
+ Pagesize int64
+ Pagemask int64
+ Pageshift int64
+ Npages int64
+ Free int64
+ Active int64
+ Inactive int64
+ Paging int64
+ Wired int64
+ Zeropages int64
+ Reserve_pagedaemon int64
+ Reserve_kernel int64
+ Freemin int64
+ Freetarg int64
+ Inactarg int64
+ Wiredmax int64
+ Nswapdev int64
+ Swpages int64
+ Swpginuse int64
+ Swpgonly int64
+ Nswget int64
+ Unused1 int64
+ Cpuhit int64
+ Cpumiss int64
+ Faults int64
+ Traps int64
+ Intrs int64
+ Swtch int64
+ Softs int64
+ Syscalls int64
+ Pageins int64
+ Swapins int64
+ Swapouts int64
+ Pgswapin int64
+ Pgswapout int64
+ Forks int64
+ Forks_ppwait int64
+ Forks_sharevm int64
+ Pga_zerohit int64
+ Pga_zeromiss int64
+ Zeroaborts int64
+ Fltnoram int64
+ Fltnoanon int64
+ Fltpgwait int64
+ Fltpgrele int64
+ Fltrelck int64
+ Fltrelckok int64
+ Fltanget int64
+ Fltanretry int64
+ Fltamcopy int64
+ Fltnamap int64
+ Fltnomap int64
+ Fltlget int64
+ Fltget int64
+ Flt_anon int64
+ Flt_acow int64
+ Flt_obj int64
+ Flt_prcopy int64
+ Flt_przero int64
+ Pdwoke int64
+ Pdrevs int64
+ Unused4 int64
+ Pdfreed int64
+ Pdscans int64
+ Pdanscan int64
+ Pdobscan int64
+ Pdreact int64
+ Pdbusy int64
+ Pdpageouts int64
+ Pdpending int64
+ Pddeact int64
+ Anonpages int64
+ Filepages int64
+ Execpages int64
+ Colorhit int64
+ Colormiss int64
+ Ncolors int64
+ Bootpages int64
+ Poolpages int64
+}
+
const SizeofClockinfo = 0x14
type Clockinfo struct {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
index 2ed718ca06a7..26eba23b729f 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
@@ -58,22 +58,22 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Mode uint32
- Dev int32
- Ino uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Rdev int32
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- X__st_birthtim Timespec
+ Mode uint32
+ Dev int32
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ _ Timespec
}
type Statfs_t struct {
@@ -98,7 +98,7 @@ type Statfs_t struct {
F_mntonname [90]byte
F_mntfromname [90]byte
F_mntfromspec [90]byte
- Pad_cgo_0 [2]byte
+ _ [2]byte
Mount_info [160]byte
}
@@ -111,13 +111,13 @@ type Flock_t struct {
}
type Dirent struct {
- Fileno uint64
- Off int64
- Reclen uint16
- Type uint8
- Namlen uint8
- X__d_padding [4]uint8
- Name [256]int8
+ Fileno uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Namlen uint8
+ _ [4]uint8
+ Name [256]int8
}
type Fsid struct {
@@ -262,8 +262,8 @@ type FdSet struct {
}
const (
- SizeofIfMsghdr = 0xec
- SizeofIfData = 0xd4
+ SizeofIfMsghdr = 0xa0
+ SizeofIfData = 0x88
SizeofIfaMsghdr = 0x18
SizeofIfAnnounceMsghdr = 0x1a
SizeofRtMsghdr = 0x60
@@ -292,7 +292,7 @@ type IfData struct {
Link_state uint8
Mtu uint32
Metric uint32
- Pad uint32
+ Rdomain uint32
Baudrate uint64
Ipackets uint64
Ierrors uint64
@@ -304,10 +304,10 @@ type IfData struct {
Imcasts uint64
Omcasts uint64
Iqdrops uint64
+ Oqdrops uint64
Noproto uint64
Capabilities uint32
Lastchange Timeval
- Mclpool [7]Mclpool
}
type IfaMsghdr struct {
@@ -368,20 +368,12 @@ type RtMetrics struct {
Pad uint32
}
-type Mclpool struct {
- Grown int32
- Alive uint16
- Hwm uint16
- Cwm uint16
- Lwm uint16
-}
-
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x8
SizeofBpfInsn = 0x8
- SizeofBpfHdr = 0x14
+ SizeofBpfHdr = 0x18
)
type BpfVersion struct {
@@ -407,11 +399,14 @@ type BpfInsn struct {
}
type BpfHdr struct {
- Tstamp BpfTimeval
- Caplen uint32
- Datalen uint32
- Hdrlen uint16
- Pad_cgo_0 [2]byte
+ Tstamp BpfTimeval
+ Caplen uint32
+ Datalen uint32
+ Hdrlen uint16
+ Ifidx uint16
+ Flowid uint16
+ Flags uint8
+ Drops uint8
}
type BpfTimeval struct {
@@ -488,7 +483,7 @@ type Uvmexp struct {
Zeropages int32
Reserve_pagedaemon int32
Reserve_kernel int32
- Anonpages int32
+ Unused01 int32
Vnodepages int32
Vtextpages int32
Freemin int32
@@ -507,8 +502,8 @@ type Uvmexp struct {
Swpgonly int32
Nswget int32
Nanon int32
- Nanonneeded int32
- Nfreeanon int32
+ Unused05 int32
+ Unused06 int32
Faults int32
Traps int32
Intrs int32
@@ -516,8 +511,8 @@ type Uvmexp struct {
Softs int32
Syscalls int32
Pageins int32
- Obsolete_swapins int32
- Obsolete_swapouts int32
+ Unused07 int32
+ Unused08 int32
Pgswapin int32
Pgswapout int32
Forks int32
@@ -525,7 +520,7 @@ type Uvmexp struct {
Forks_sharevm int32
Pga_zerohit int32
Pga_zeromiss int32
- Zeroaborts int32
+ Unused09 int32
Fltnoram int32
Fltnoanon int32
Fltnoamap int32
@@ -557,9 +552,9 @@ type Uvmexp struct {
Pdpageouts int32
Pdpending int32
Pddeact int32
- Pdreanon int32
- Pdrevnode int32
- Pdrevtext int32
+ Unused11 int32
+ Unused12 int32
+ Unused13 int32
Fpswtch int32
Kmapent int32
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
index b4fb97ebe650..5a5479886989 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
@@ -73,7 +73,6 @@ type Stat_t struct {
Blksize int32
Flags uint32
Gen uint32
- _ [4]byte
_ Timespec
}
@@ -81,7 +80,6 @@ type Statfs_t struct {
F_flags uint32
F_bsize uint32
F_iosize uint32
- _ [4]byte
F_blocks uint64
F_bfree uint64
F_bavail int64
@@ -200,10 +198,8 @@ type IPv6Mreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- _ [4]byte
Iov *Iovec
Iovlen uint32
- _ [4]byte
Control *byte
Controllen uint32
Flags int32
@@ -311,7 +307,6 @@ type IfData struct {
Oqdrops uint64
Noproto uint64
Capabilities uint32
- _ [4]byte
Lastchange Timeval
}
@@ -373,14 +368,12 @@ type RtMetrics struct {
Pad uint32
}
-type Mclpool struct{}
-
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x10
SizeofBpfInsn = 0x8
- SizeofBpfHdr = 0x14
+ SizeofBpfHdr = 0x18
)
type BpfVersion struct {
@@ -395,7 +388,6 @@ type BpfStat struct {
type BpfProgram struct {
Len uint32
- _ [4]byte
Insns *BpfInsn
}
@@ -411,7 +403,10 @@ type BpfHdr struct {
Caplen uint32
Datalen uint32
Hdrlen uint16
- _ [2]byte
+ Ifidx uint16
+ Flowid uint16
+ Flags uint8
+ Drops uint8
}
type BpfTimeval struct {
@@ -488,7 +483,7 @@ type Uvmexp struct {
Zeropages int32
Reserve_pagedaemon int32
Reserve_kernel int32
- Anonpages int32
+ Unused01 int32
Vnodepages int32
Vtextpages int32
Freemin int32
@@ -507,8 +502,8 @@ type Uvmexp struct {
Swpgonly int32
Nswget int32
Nanon int32
- Nanonneeded int32
- Nfreeanon int32
+ Unused05 int32
+ Unused06 int32
Faults int32
Traps int32
Intrs int32
@@ -516,8 +511,8 @@ type Uvmexp struct {
Softs int32
Syscalls int32
Pageins int32
- Obsolete_swapins int32
- Obsolete_swapouts int32
+ Unused07 int32
+ Unused08 int32
Pgswapin int32
Pgswapout int32
Forks int32
@@ -525,7 +520,7 @@ type Uvmexp struct {
Forks_sharevm int32
Pga_zerohit int32
Pga_zeromiss int32
- Zeroaborts int32
+ Unused09 int32
Fltnoram int32
Fltnoanon int32
Fltnoamap int32
@@ -557,9 +552,9 @@ type Uvmexp struct {
Pdpageouts int32
Pdpending int32
Pddeact int32
- Pdreanon int32
- Pdrevnode int32
- Pdrevtext int32
+ Unused11 int32
+ Unused12 int32
+ Unused13 int32
Fpswtch int32
Kmapent int32
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
index 2c4675040ef3..be58c4e1ff8b 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
@@ -375,14 +375,12 @@ type RtMetrics struct {
Pad uint32
}
-type Mclpool struct{}
-
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x8
SizeofBpfInsn = 0x8
- SizeofBpfHdr = 0x14
+ SizeofBpfHdr = 0x18
)
type BpfVersion struct {
@@ -412,7 +410,10 @@ type BpfHdr struct {
Caplen uint32
Datalen uint32
Hdrlen uint16
- _ [2]byte
+ Ifidx uint16
+ Flowid uint16
+ Flags uint8
+ Drops uint8
}
type BpfTimeval struct {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
index ddee04514708..52338266cb3e 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
@@ -368,14 +368,12 @@ type RtMetrics struct {
Pad uint32
}
-type Mclpool struct{}
-
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x10
SizeofBpfInsn = 0x8
- SizeofBpfHdr = 0x14
+ SizeofBpfHdr = 0x18
)
type BpfVersion struct {
@@ -405,7 +403,10 @@ type BpfHdr struct {
Caplen uint32
Datalen uint32
Hdrlen uint16
- _ [2]byte
+ Ifidx uint16
+ Flowid uint16
+ Flags uint8
+ Drops uint8
}
type BpfTimeval struct {
diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
index eb13d4e8bfc2..605cfdb12b1d 100644
--- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
+++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
@@ -368,14 +368,12 @@ type RtMetrics struct {
Pad uint32
}
-type Mclpool struct{}
-
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x10
SizeofBpfInsn = 0x8
- SizeofBpfHdr = 0x14
+ SizeofBpfHdr = 0x18
)
type BpfVersion struct {
@@ -405,7 +403,10 @@ type BpfHdr struct {
Caplen uint32
Datalen uint32
Hdrlen uint16
- _ [2]byte
+ Ifidx uint16
+ Flowid uint16
+ Flags uint8
+ Drops uint8
}
type BpfTimeval struct {
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go
index 1d7912760c7a..bd0d5a92f30f 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go
@@ -20,8 +20,6 @@ import (
"context"
"fmt"
"math"
- "os"
- "sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -313,41 +311,6 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod)
s.TpPairToMatchNum[tp] += count
}
}
-
- // backward compatibility with k8s < v1.24: also count (for skew evaluation) pods matching
- // the spread label selector yet running on nodes not supported by this pod's node affinities
- matchingPodsCountOnNonAffinityNodes := make(map[topologyPair]int, len(allNodes))
- var matchingPodsCountOnNonAffinityNodesMu sync.Mutex
- pl.parallelizer.Until(ctx, len(allNodes), func(i int) {
- if len(tpCountsByNode[i]) > 0 {
- return // already accounted (nodes matching our pods' nodeaffinity)
- }
- if os.Getenv("SPREADTOPOLOGY_SKEW_FILTERS_NODEAFFINITY") != "disabled" {
- return // we should only set that to "disabled" on k8s < 1.24
- }
- nodeInfo := allNodes[i]
- node := nodeInfo.Node()
- if !nodeLabelsMatchSpreadConstraints(node.Labels, constraints) {
- return // node not having the spreadconstraint key/label
- }
- for _, constraint := range constraints {
- pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]}
- if _, ok := s.TpPairToMatchNum[pair]; !ok {
- // ignore spreadconstraint values (eg. zone names) that are not provided
- // by any node running pods that match the spreadconstraint label selector
- // AND that satisfy the pods' nodeaffinity (k8s < 1.24 behaviour).
- continue
- }
- count := countPodsMatchSelector(nodeInfo.Pods, constraint.Selector, pod.Namespace)
- matchingPodsCountOnNonAffinityNodesMu.Lock()
- matchingPodsCountOnNonAffinityNodes[pair] += count
- matchingPodsCountOnNonAffinityNodesMu.Unlock()
- }
- }, pl.Name())
- for pair, count := range matchingPodsCountOnNonAffinityNodes {
- s.TpPairToMatchNum[pair] += count
- }
-
if pl.enableMinDomainsInPodTopologySpread {
s.TpKeyToDomainsNum = make(map[string]int, len(constraints))
for tp := range s.TpPairToMatchNum {
diff --git a/cluster-autoscaler/vendor/k8s.io/utils/net/ipfamily.go b/cluster-autoscaler/vendor/k8s.io/utils/net/ipfamily.go
new file mode 100644
index 000000000000..1a51fa391891
--- /dev/null
+++ b/cluster-autoscaler/vendor/k8s.io/utils/net/ipfamily.go
@@ -0,0 +1,181 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPFamily refers to a specific family if not empty, i.e. "4" or "6".
+type IPFamily string
+
+// Constants for valid IPFamilys:
+const (
+ IPFamilyUnknown IPFamily = ""
+
+ IPv4 IPFamily = "4"
+ IPv6 IPFamily = "6"
+)
+
+// IsDualStackIPs returns true if:
+// - all elements of ips are valid
+// - at least one IP from each family (v4 and v6) is present
+func IsDualStackIPs(ips []net.IP) (bool, error) {
+ v4Found := false
+ v6Found := false
+ for i, ip := range ips {
+ switch IPFamilyOf(ip) {
+ case IPv4:
+ v4Found = true
+ case IPv6:
+ v6Found = true
+ default:
+ return false, fmt.Errorf("invalid IP[%d]: %v", i, ip)
+ }
+ }
+
+ return (v4Found && v6Found), nil
+}
+
+// IsDualStackIPStrings returns true if:
+// - all elements of ips can be parsed as IPs
+// - at least one IP from each family (v4 and v6) is present
+func IsDualStackIPStrings(ips []string) (bool, error) {
+ parsedIPs := make([]net.IP, 0, len(ips))
+ for i, ip := range ips {
+ parsedIP := ParseIPSloppy(ip)
+ if parsedIP == nil {
+ return false, fmt.Errorf("invalid IP[%d]: %v", i, ip)
+ }
+ parsedIPs = append(parsedIPs, parsedIP)
+ }
+ return IsDualStackIPs(parsedIPs)
+}
+
+// IsDualStackCIDRs returns true if:
+// - all elements of cidrs are non-nil
+// - at least one CIDR from each family (v4 and v6) is present
+func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
+ v4Found := false
+ v6Found := false
+ for i, cidr := range cidrs {
+ switch IPFamilyOfCIDR(cidr) {
+ case IPv4:
+ v4Found = true
+ case IPv6:
+ v6Found = true
+ default:
+ return false, fmt.Errorf("invalid CIDR[%d]: %v", i, cidr)
+ }
+ }
+
+ return (v4Found && v6Found), nil
+}
+
+// IsDualStackCIDRStrings returns if
+// - all elements of cidrs can be parsed as CIDRs
+// - at least one CIDR from each family (v4 and v6) is present
+func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
+ parsedCIDRs, err := ParseCIDRs(cidrs)
+ if err != nil {
+ return false, err
+ }
+ return IsDualStackCIDRs(parsedCIDRs)
+}
+
+// IPFamilyOf returns the IP family of ip, or IPFamilyUnknown if it is invalid.
+func IPFamilyOf(ip net.IP) IPFamily {
+ switch {
+ case ip.To4() != nil:
+ return IPv4
+ case ip.To16() != nil:
+ return IPv6
+ default:
+ return IPFamilyUnknown
+ }
+}
+
+// IPFamilyOfString returns the IP family of ip, or IPFamilyUnknown if ip cannot
+// be parsed as an IP.
+func IPFamilyOfString(ip string) IPFamily {
+ return IPFamilyOf(ParseIPSloppy(ip))
+}
+
+// IPFamilyOfCIDR returns the IP family of cidr.
+func IPFamilyOfCIDR(cidr *net.IPNet) IPFamily {
+ if cidr == nil {
+ return IPFamilyUnknown
+ }
+ return IPFamilyOf(cidr.IP)
+}
+
+// IPFamilyOfCIDRString returns the IP family of cidr.
+func IPFamilyOfCIDRString(cidr string) IPFamily {
+ ip, _, _ := ParseCIDRSloppy(cidr)
+ return IPFamilyOf(ip)
+}
+
+// IsIPv6 returns true if netIP is IPv6 (and false if it is IPv4, nil, or invalid).
+func IsIPv6(netIP net.IP) bool {
+ return IPFamilyOf(netIP) == IPv6
+}
+
+// IsIPv6String returns true if ip contains a single IPv6 address and nothing else. It
+// returns false if ip is an empty string, an IPv4 address, or anything else that is not a
+// single IPv6 address.
+func IsIPv6String(ip string) bool {
+ return IPFamilyOfString(ip) == IPv6
+}
+
+// IsIPv6CIDR returns true if a cidr is a valid IPv6 CIDR. It returns false if cidr is
+// nil or an IPv4 CIDR. Its behavior is not defined if cidr is invalid.
+func IsIPv6CIDR(cidr *net.IPNet) bool {
+ return IPFamilyOfCIDR(cidr) == IPv6
+}
+
+// IsIPv6CIDRString returns true if cidr contains a single IPv6 CIDR and nothing else. It
+// returns false if cidr is an empty string, an IPv4 CIDR, or anything else that is not a
+// single valid IPv6 CIDR.
+func IsIPv6CIDRString(cidr string) bool {
+ return IPFamilyOfCIDRString(cidr) == IPv6
+}
+
+// IsIPv4 returns true if netIP is IPv4 (and false if it is IPv6, nil, or invalid).
+func IsIPv4(netIP net.IP) bool {
+ return IPFamilyOf(netIP) == IPv4
+}
+
+// IsIPv4String returns true if ip contains a single IPv4 address and nothing else. It
+// returns false if ip is an empty string, an IPv6 address, or anything else that is not a
+// single IPv4 address.
+func IsIPv4String(ip string) bool {
+ return IPFamilyOfString(ip) == IPv4
+}
+
+// IsIPv4CIDR returns true if cidr is a valid IPv4 CIDR. It returns false if cidr is nil
+// or an IPv6 CIDR. Its behavior is not defined if cidr is invalid.
+func IsIPv4CIDR(cidr *net.IPNet) bool {
+ return IPFamilyOfCIDR(cidr) == IPv4
+}
+
+// IsIPv4CIDRString returns true if cidr contains a single IPv4 CIDR and nothing else. It
+// returns false if cidr is an empty string, an IPv6 CIDR, or anything else that is not a
+// single valid IPv4 CIDR.
+func IsIPv4CIDRString(cidr string) bool {
+ return IPFamilyOfCIDRString(cidr) == IPv4
+}
diff --git a/cluster-autoscaler/vendor/k8s.io/utils/net/net.go b/cluster-autoscaler/vendor/k8s.io/utils/net/net.go
index b7c08e2e003f..704c1f232aef 100644
--- a/cluster-autoscaler/vendor/k8s.io/utils/net/net.go
+++ b/cluster-autoscaler/vendor/k8s.io/utils/net/net.go
@@ -29,138 +29,16 @@ import (
// order is maintained
func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) {
cidrs := make([]*net.IPNet, 0, len(cidrsString))
- for _, cidrString := range cidrsString {
+ for i, cidrString := range cidrsString {
_, cidr, err := ParseCIDRSloppy(cidrString)
if err != nil {
- return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err)
+ return nil, fmt.Errorf("invalid CIDR[%d]: %v (%v)", i, cidr, err)
}
cidrs = append(cidrs, cidr)
}
return cidrs, nil
}
-// IsDualStackIPs returns if a slice of ips is:
-// - all are valid ips
-// - at least one ip from each family (v4 or v6)
-func IsDualStackIPs(ips []net.IP) (bool, error) {
- v4Found := false
- v6Found := false
- for _, ip := range ips {
- if ip == nil {
- return false, fmt.Errorf("ip %v is invalid", ip)
- }
-
- if v4Found && v6Found {
- continue
- }
-
- if IsIPv6(ip) {
- v6Found = true
- continue
- }
-
- v4Found = true
- }
-
- return (v4Found && v6Found), nil
-}
-
-// IsDualStackIPStrings returns if
-// - all are valid ips
-// - at least one ip from each family (v4 or v6)
-func IsDualStackIPStrings(ips []string) (bool, error) {
- parsedIPs := make([]net.IP, 0, len(ips))
- for _, ip := range ips {
- parsedIP := ParseIPSloppy(ip)
- parsedIPs = append(parsedIPs, parsedIP)
- }
- return IsDualStackIPs(parsedIPs)
-}
-
-// IsDualStackCIDRs returns if
-// - all are valid cidrs
-// - at least one cidr from each family (v4 or v6)
-func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
- v4Found := false
- v6Found := false
- for _, cidr := range cidrs {
- if cidr == nil {
- return false, fmt.Errorf("cidr %v is invalid", cidr)
- }
-
- if v4Found && v6Found {
- continue
- }
-
- if IsIPv6(cidr.IP) {
- v6Found = true
- continue
- }
- v4Found = true
- }
-
- return v4Found && v6Found, nil
-}
-
-// IsDualStackCIDRStrings returns if
-// - all are valid cidrs
-// - at least one cidr from each family (v4 or v6)
-func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
- parsedCIDRs, err := ParseCIDRs(cidrs)
- if err != nil {
- return false, err
- }
- return IsDualStackCIDRs(parsedCIDRs)
-}
-
-// IsIPv6 returns if netIP is IPv6.
-func IsIPv6(netIP net.IP) bool {
- return netIP != nil && netIP.To4() == nil
-}
-
-// IsIPv6String returns if ip is IPv6.
-func IsIPv6String(ip string) bool {
- netIP := ParseIPSloppy(ip)
- return IsIPv6(netIP)
-}
-
-// IsIPv6CIDRString returns if cidr is IPv6.
-// This assumes cidr is a valid CIDR.
-func IsIPv6CIDRString(cidr string) bool {
- ip, _, _ := ParseCIDRSloppy(cidr)
- return IsIPv6(ip)
-}
-
-// IsIPv6CIDR returns if a cidr is ipv6
-func IsIPv6CIDR(cidr *net.IPNet) bool {
- ip := cidr.IP
- return IsIPv6(ip)
-}
-
-// IsIPv4 returns if netIP is IPv4.
-func IsIPv4(netIP net.IP) bool {
- return netIP != nil && netIP.To4() != nil
-}
-
-// IsIPv4String returns if ip is IPv4.
-func IsIPv4String(ip string) bool {
- netIP := ParseIPSloppy(ip)
- return IsIPv4(netIP)
-}
-
-// IsIPv4CIDR returns if a cidr is ipv4
-func IsIPv4CIDR(cidr *net.IPNet) bool {
- ip := cidr.IP
- return IsIPv4(ip)
-}
-
-// IsIPv4CIDRString returns if cidr is IPv4.
-// This assumes cidr is a valid CIDR.
-func IsIPv4CIDRString(cidr string) bool {
- ip, _, _ := ParseCIDRSloppy(cidr)
- return IsIPv4(ip)
-}
-
// ParsePort parses a string representing an IP port. If the string is not a
// valid port number, this returns an error.
func ParsePort(port string, allowZero bool) (int, error) {
diff --git a/cluster-autoscaler/vendor/k8s.io/utils/net/port.go b/cluster-autoscaler/vendor/k8s.io/utils/net/port.go
index 7ac04f0dc983..c6a53fa02b10 100644
--- a/cluster-autoscaler/vendor/k8s.io/utils/net/port.go
+++ b/cluster-autoscaler/vendor/k8s.io/utils/net/port.go
@@ -23,15 +23,6 @@ import (
"strings"
)
-// IPFamily refers to a specific family if not empty, i.e. "4" or "6".
-type IPFamily string
-
-// Constants for valid IPFamilys:
-const (
- IPv4 IPFamily = "4"
- IPv6 = "6"
-)
-
// Protocol is a network protocol support by LocalPort.
type Protocol string
@@ -67,7 +58,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco
if protocol != TCP && protocol != UDP {
return nil, fmt.Errorf("Unsupported protocol %s", protocol)
}
- if ipFamily != "" && ipFamily != "4" && ipFamily != "6" {
+ if ipFamily != IPFamilyUnknown && ipFamily != IPv4 && ipFamily != IPv6 {
return nil, fmt.Errorf("Invalid IP family %s", ipFamily)
}
if ip != "" {
@@ -75,9 +66,10 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco
if parsedIP == nil {
return nil, fmt.Errorf("invalid ip address %s", ip)
}
- asIPv4 := parsedIP.To4()
- if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 {
- return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily)
+ if ipFamily != IPFamilyUnknown {
+ if IPFamily(parsedIP) != ipFamily {
+ return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily)
+ }
}
}
return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil
diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt
index 0a4a681206d9..a4fcd0aaa898 100644
--- a/cluster-autoscaler/vendor/modules.txt
+++ b/cluster-autoscaler/vendor/modules.txt
@@ -1,11 +1,11 @@
# cloud.google.com/go v0.97.0
## explicit; go 1.11
cloud.google.com/go/compute/metadata
-# github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
+# github.com/Azure/azure-sdk-for-go v67.2.0+incompatible
## explicit
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute
-github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute
+github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute
github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry
github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice
github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice
@@ -15,16 +15,17 @@ github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage
+github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage
github.com/Azure/azure-sdk-for-go/storage
github.com/Azure/azure-sdk-for-go/version
# github.com/Azure/go-autorest v14.2.0+incompatible
## explicit
github.com/Azure/go-autorest
-# github.com/Azure/go-autorest/autorest v0.11.27
+# github.com/Azure/go-autorest/autorest v0.11.28
## explicit; go 1.15
github.com/Azure/go-autorest/autorest
github.com/Azure/go-autorest/autorest/azure
-# github.com/Azure/go-autorest/autorest/adal v0.9.20
+# github.com/Azure/go-autorest/autorest/adal v0.9.21
## explicit; go 1.15
github.com/Azure/go-autorest/autorest/adal
# github.com/Azure/go-autorest/autorest/azure/auth v0.5.8
@@ -546,7 +547,7 @@ github.com/seccomp/libseccomp-golang
# github.com/sirupsen/logrus v1.8.1
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/spf13/cobra v1.6.0
+# github.com/spf13/cobra v1.6.1
## explicit; go 1.15
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
@@ -555,10 +556,10 @@ github.com/spf13/pflag
# github.com/stoewer/go-strcase v1.2.0
## explicit; go 1.11
github.com/stoewer/go-strcase
-# github.com/stretchr/objx v0.4.0
+# github.com/stretchr/objx v0.5.0
## explicit; go 1.12
github.com/stretchr/objx
-# github.com/stretchr/testify v1.8.0
+# github.com/stretchr/testify v1.8.1
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
@@ -719,7 +720,7 @@ go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
go.uber.org/zap/zapgrpc
-# golang.org/x/crypto v0.1.0
+# golang.org/x/crypto v0.5.0
## explicit; go 1.17
golang.org/x/crypto/cryptobyte
golang.org/x/crypto/cryptobyte/asn1
@@ -729,7 +730,7 @@ golang.org/x/crypto/nacl/secretbox
golang.org/x/crypto/pkcs12
golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/salsa20/salsa
-# golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10
+# golang.org/x/net v0.5.0
## explicit; go 1.17
golang.org/x/net/bpf
golang.org/x/net/context
@@ -758,7 +759,7 @@ golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
## explicit
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.3.0
+# golang.org/x/sys v0.4.0
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/internal/unsafeheader
@@ -767,10 +768,10 @@ golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc
-# golang.org/x/term v0.3.0
+# golang.org/x/term v0.4.0
## explicit; go 1.17
golang.org/x/term
-# golang.org/x/text v0.5.0
+# golang.org/x/text v0.6.0
## explicit; go 1.17
golang.org/x/text/encoding
golang.org/x/text/encoding/internal
@@ -1948,7 +1949,7 @@ k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.26.1-rc.0
## explicit; go 1.19
k8s.io/mount-utils
-# k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
+# k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
## explicit; go 1.18
k8s.io/utils/buffer
k8s.io/utils/clock
@@ -1972,11 +1973,11 @@ k8s.io/utils/trace
## explicit; go 1.17
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
-# sigs.k8s.io/cloud-provider-azure v1.24.2
-## explicit; go 1.18
-sigs.k8s.io/cloud-provider-azure/pkg/auth
+# sigs.k8s.io/cloud-provider-azure v1.26.2
+## explicit; go 1.19
sigs.k8s.io/cloud-provider-azure/pkg/azureclients
sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient
sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient
sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/mockcontainerserviceclient
sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient
@@ -2021,8 +2022,10 @@ sigs.k8s.io/cloud-provider-azure/pkg/consts
sigs.k8s.io/cloud-provider-azure/pkg/metrics
sigs.k8s.io/cloud-provider-azure/pkg/nodemanager
sigs.k8s.io/cloud-provider-azure/pkg/provider
+sigs.k8s.io/cloud-provider-azure/pkg/provider/config
sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine
sigs.k8s.io/cloud-provider-azure/pkg/retry
+sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy
sigs.k8s.io/cloud-provider-azure/pkg/version
# sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
## explicit; go 1.18
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go
index ce3ae2374fa0..835a63b4544e 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go
@@ -18,25 +18,42 @@ package armclient
import (
"context"
+ "crypto/tls"
"fmt"
"html"
+ "net"
"net/http"
+ "net/http/cookiejar"
"net/url"
"strings"
"sync"
"time"
"unicode"
- "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
-
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
-
+ "github.com/Azure/go-autorest/tracing"
"k8s.io/klog/v2"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
"sigs.k8s.io/cloud-provider-azure/pkg/version"
)
+// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums
+
+type defaultSender struct {
+ sender autorest.Sender
+ init *sync.Once
+}
+
+// each type of sender will be created on demand in sender()
+var defaultSenders defaultSender
+
+func init() {
+ defaultSenders.init = &sync.Once{}
+}
+
var _ Interface = &Client{}
// Client implements ARM client Interface.
@@ -47,10 +64,57 @@ type Client struct {
regionalEndpoint string
}
+func sender() autorest.Sender {
+ // note that we can't init defaultSenders in init() since it will
+ // execute before calling code has had a chance to enable tracing
+ defaultSenders.init.Do(func() {
+ // copied from http.DefaultTransport with a TLS minimum version.
+ transport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second, // the same as default transport
+ KeepAlive: 30 * time.Second, // the same as default transport
+ }).DialContext,
+ ForceAttemptHTTP2: true, // always attempt HTTP/2 even though custom dialer is provided
+ MaxIdleConns: 100, // Zero means no limit, the same as default transport
+ MaxIdleConnsPerHost: 100, // Default is 2, ref:https://cs.opensource.google/go/go/+/go1.18.4:src/net/http/transport.go;l=58
+ IdleConnTimeout: 90 * time.Second, // the same as default transport
+ TLSHandshakeTimeout: 10 * time.Second, // the same as default transport
+ ExpectContinueTimeout: 1 * time.Second, // the same as default transport
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12, //force to use TLS 1.2
+ Renegotiation: tls.RenegotiateNever, // the same as default transport https://pkg.go.dev/crypto/tls#RenegotiationSupport
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ defaultSenders.sender = &http.Client{Jar: j, Transport: roundTripper}
+
+ // In go-autorest SDK https://github.com/Azure/go-autorest/blob/master/autorest/sender.go#L258-L287,
+ // if ARM returns http.StatusTooManyRequests, the sender doesn't increase the retry attempt count,
+ // hence the Azure clients will keep retrying forever until it get a status code other than 429.
+ // So we explicitly removes http.StatusTooManyRequests from autorest.StatusCodesForRetry.
+ // Refer https://github.com/Azure/go-autorest/issues/398.
+ // TODO(feiskyer): Use autorest.SendDecorator to customize the retry policy when new Azure SDK is available.
+ statusCodesForRetry := make([]int, 0)
+ for _, code := range autorest.StatusCodesForRetry {
+ if code != http.StatusTooManyRequests {
+ statusCodesForRetry = append(statusCodesForRetry, code)
+ }
+ }
+ autorest.StatusCodesForRetry = statusCodesForRetry
+ })
+ return defaultSenders.sender
+}
+
// New creates a ARM client
func New(authorizer autorest.Authorizer, clientConfig azureclients.ClientConfig, baseURI, apiVersion string, sendDecoraters ...autorest.SendDecorator) *Client {
restClient := autorest.NewClientWithUserAgent(clientConfig.UserAgent)
restClient.Authorizer = authorizer
+ restClient.Sender = sender()
if clientConfig.UserAgent == "" {
restClient.UserAgent = GetUserAgent(restClient)
@@ -94,7 +158,6 @@ func New(authorizer autorest.Authorizer, clientConfig azureclients.ClientConfig,
client.client.Sender = autorest.DecorateSender(client.client,
autorest.DoCloseIfError(),
retry.DoExponentialBackoffRetry(backoff),
- DoHackRegionalRetryDecorator(client),
DoDumpRequest(10),
)
@@ -248,7 +311,7 @@ func (c *Client) SendAsync(ctx context.Context, request *http.Request) (*azure.F
return &future, asyncResponse, nil
}
-// GetResource get a resource by resource ID
+// GetResourceWithExpandQuery get a resource by resource ID with expand
func (c *Client) GetResourceWithExpandQuery(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error) {
var decorators []autorest.PrepareDecorator
if expand != "" {
@@ -260,6 +323,35 @@ func (c *Client) GetResourceWithExpandQuery(ctx context.Context, resourceID, exp
return c.GetResource(ctx, resourceID, decorators...)
}
+// GetResourceWithExpandAPIVersionQuery get a resource by resource ID with expand and API version.
+func (c *Client) GetResourceWithExpandAPIVersionQuery(ctx context.Context, resourceID, expand, apiVersion string) (*http.Response, *retry.Error) {
+ decorators := []autorest.PrepareDecorator{
+ withAPIVersion(apiVersion),
+ }
+ if expand != "" {
+ decorators = append(decorators, autorest.WithQueryParameters(map[string]interface{}{
+ "$expand": autorest.Encode("query", expand),
+ }))
+ }
+
+ return c.GetResource(ctx, resourceID, decorators...)
+}
+
+// GetResourceWithQueries get a resource by resource ID with queries.
+func (c *Client) GetResourceWithQueries(ctx context.Context, resourceID string, queries map[string]interface{}) (*http.Response, *retry.Error) {
+
+ queryParameters := make(map[string]interface{})
+ for queryKey, queryValue := range queries {
+ queryParameters[queryKey] = autorest.Encode("query", queryValue)
+ }
+
+ decorators := []autorest.PrepareDecorator{
+ autorest.WithQueryParameters(queryParameters),
+ }
+
+ return c.GetResource(ctx, resourceID, decorators...)
+}
+
// GetResourceWithDecorators get a resource with decorators by resource ID
func (c *Client) GetResource(ctx context.Context, resourceID string, decorators ...autorest.PrepareDecorator) (*http.Response, *retry.Error) {
getDecorators := append([]autorest.PrepareDecorator{
@@ -271,7 +363,7 @@ func (c *Client) GetResource(ctx context.Context, resourceID string, decorators
return nil, retry.NewError(false, err)
}
- return c.Send(ctx, request)
+ return c.Send(ctx, request, DoHackRegionalRetryForGET(c))
}
// PutResource puts a resource by resource ID
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go
index ae8cbb4d2cee..e66ba8a18ba9 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go
@@ -80,10 +80,16 @@ type Interface interface {
// HeadResource heads a resource by resource ID
HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error)
- // GetResourceWithExpandQuery get a resource by resource ID
+ // GetResourceWithExpandQuery get a resource by resource ID with expand
GetResourceWithExpandQuery(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error)
- //GetResourceWithDecorators get a resource with decorators by resource ID
+ // GetResourceWithExpandAPIVersionQuery get a resource by resource ID with expand and API version.
+ GetResourceWithExpandAPIVersionQuery(ctx context.Context, resourceID, expand, apiVersion string) (*http.Response, *retry.Error)
+
+ // GetResourceWithQueries get a resource by resource ID with queries.
+ GetResourceWithQueries(ctx context.Context, resourceID string, queries map[string]interface{}) (*http.Response, *retry.Error)
+
+ // GetResource get a resource with decorators by resource ID
GetResource(ctx context.Context, resourceID string, decorators ...autorest.PrepareDecorator) (*http.Response, *retry.Error)
// PostResource posts a resource by resource ID
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go
index 582a235f9f38..afefd2cfc729 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go
@@ -30,6 +30,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
+
"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
@@ -103,8 +104,8 @@ func WithMetricsSendDecoratorWrapper(prefix, request, resourceGroup, subscriptio
return nil
}
-// DoExponentialBackoffRetry returns an autorest.SendDecorator which performs retry with customizable backoff policy.
-func DoHackRegionalRetryDecorator(c *Client) autorest.SendDecorator {
+// DoHackRegionalRetryForGET checks if GET request returns empty response and retries regional server or returns error.
+func DoHackRegionalRetryForGET(c *Client) autorest.SendDecorator {
return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(request *http.Request) (*http.Response, error) {
response, rerr := s.Do(request)
@@ -112,30 +113,40 @@ func DoHackRegionalRetryDecorator(c *Client) autorest.SendDecorator {
klog.V(2).Infof("response is empty")
return response, rerr
}
- if rerr == nil || response.StatusCode == http.StatusNotFound || c.regionalEndpoint == "" {
- return response, rerr
- }
- // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow
+
bodyBytes, _ := ioutil.ReadAll(response.Body)
defer func() {
response.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
}()
bodyString := string(bodyBytes)
- var body map[string]interface{}
- if e := json.Unmarshal(bodyBytes, &body); e != nil {
- klog.Errorf("Send.sendRequest: error in parsing response body string: %s, Skip retrying regional host", e.Error())
- return response, rerr
- }
- klog.V(5).Infof("Send.sendRequest original response: %s", bodyString)
+ trimmed := strings.TrimSpace(bodyString)
+ klog.V(5).Infof("Send.sendRequest got response with ContentLength %d, StatusCode %d and responseBody length %d", response.ContentLength, response.StatusCode, len(trimmed))
- if err, ok := body["error"].(map[string]interface{}); !ok ||
- err["code"] == nil ||
- !strings.EqualFold(err["code"].(string), "ResourceGroupNotFound") {
- klog.V(5).Infof("Send.sendRequest: response body does not contain ResourceGroupNotFound error code. Skip retrying regional host")
- return response, rerr
+ // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow
+ // Empty content and 2xx http status code are returned in this case.
+ // Issue: https://github.com/kubernetes-sigs/cloud-provider-azure/issues/1296
+ // Such situation also needs retrying that ContentLength is -1, StatusCode is 200 and an empty body is returned.
+ emptyResp := (response.ContentLength == 0 || trimmed == "" || trimmed == "{}") && response.StatusCode >= 200 && response.StatusCode < 300
+ if !emptyResp {
+ if rerr == nil || response.StatusCode == http.StatusNotFound || c.regionalEndpoint == "" {
+ return response, rerr
+ }
+
+ var body map[string]interface{}
+ if e := json.Unmarshal(bodyBytes, &body); e != nil {
+ klog.Errorf("Send.sendRequest: error in parsing response body string %q: %s, Skip retrying regional host", bodyBytes, e.Error())
+ return response, rerr
+ }
+
+ err, ok := body["error"].(map[string]interface{})
+ if !ok || err["code"] == nil || !strings.EqualFold(err["code"].(string), "ResourceGroupNotFound") {
+ klog.V(5).Infof("Send.sendRequest: response body does not contain ResourceGroupNotFound error code. Skip retrying regional host")
+ return response, rerr
+ }
}
+ // Do regional request
currentHost := request.URL.Host
if request.Host != "" {
currentHost = request.Host
@@ -148,9 +159,10 @@ func DoHackRegionalRetryDecorator(c *Client) autorest.SendDecorator {
request.Host = c.regionalEndpoint
request.URL.Host = c.regionalEndpoint
- klog.V(5).Infof("Send.sendRegionalRequest on ResourceGroupNotFound error. Retrying regional host: %s", html.EscapeString(request.Host))
+ klog.V(6).Infof("Send.sendRegionalRequest on ResourceGroupNotFound error. Retrying regional host: %s", html.EscapeString(request.Host))
regionalResponse, regionalError := s.Do(request)
+
// only use the result if the regional request actually goes through and returns 2xx status code, for two reasons:
// 1. the retry on regional ARM host approach is a hack.
// 2. the concatenated regional uri could be wrong as the rule is not officially declared by ARM.
@@ -160,9 +172,24 @@ func DoHackRegionalRetryDecorator(c *Client) autorest.SendDecorator {
regionalErrStr = regionalError.Error()
}
- klog.V(5).Infof("Send.sendRegionalRequest failed to get response from regional host, error: '%s'. Ignoring the result.", regionalErrStr)
+ klog.V(6).Infof("Send.sendRegionalRequest failed to get response from regional host, error: %q. Ignoring the result.", regionalErrStr)
return response, rerr
}
+
+ // Do the same check on regional response just like the global one
+ bodyBytes, _ = ioutil.ReadAll(regionalResponse.Body)
+ defer func() {
+ regionalResponse.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
+ }()
+ bodyString = string(bodyBytes)
+ trimmed = strings.TrimSpace(bodyString)
+ emptyResp = (regionalResponse.ContentLength == 0 || trimmed == "" || trimmed == "{}") && regionalResponse.StatusCode >= 200 && regionalResponse.StatusCode < 300
+ if emptyResp {
+ contentLengthErrStr := fmt.Sprintf("empty response with trimmed body %q, ContentLength %d and StatusCode %d", trimmed, regionalResponse.ContentLength, regionalResponse.StatusCode)
+ klog.Errorf(contentLengthErrStr)
+ return response, fmt.Errorf(contentLengthErrStr)
+ }
+
return regionalResponse, regionalError
})
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/azure_blobclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/azure_blobclient.go
new file mode 100644
index 000000000000..517f8bda7657
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/azure_blobclient.go
@@ -0,0 +1,270 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package blobclient
+
+import (
+ "context"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog/v2"
+
+ azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+ "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements the blobclient interface
+type Client struct {
+ armClient armclient.Interface
+ subscriptionID string
+ cloudName string
+
+ // Rate limiting configures.
+ rateLimiterReader flowcontrol.RateLimiter
+ rateLimiterWriter flowcontrol.RateLimiter
+
+ // ARM throttling configures.
+ RetryAfterReader time.Time
+ RetryAfterWriter time.Time
+
+ // now allows for injecting fake or real now time into code
+ now func() time.Time
+}
+
+// New creates a blobContainersClient
+func New(config *azclients.ClientConfig) *Client {
+ baseURI := config.ResourceManagerEndpoint
+ authorizer := config.Authorizer
+ apiVersion := APIVersion
+
+ if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+ apiVersion = AzureStackCloudAPIVersion
+ }
+
+ klog.V(2).Infof("Azure BlobClient using API version: %s", apiVersion)
+ armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+ rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+ if azclients.RateLimitEnabled(config.RateLimitConfig) {
+ klog.V(2).Infof("Azure BlobClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+ config.RateLimitConfig.CloudProviderRateLimitQPS,
+ config.RateLimitConfig.CloudProviderRateLimitBucket)
+ klog.V(2).Infof("Azure BlobClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+ config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+ config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+ }
+
+ client := &Client{
+ armClient: armClient,
+ rateLimiterReader: rateLimiterReader,
+ rateLimiterWriter: rateLimiterWriter,
+ subscriptionID: config.SubscriptionID,
+ cloudName: config.CloudName,
+ now: time.Now,
+ }
+
+ return client
+}
+
+// CreateContainer creates a blob container
+func (c *Client) CreateContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string, parameters storage.BlobContainer) *retry.Error {
+ if subsID == "" {
+ subsID = c.subscriptionID
+ }
+
+ mc := metrics.NewMetricContext("blob_container", "create", resourceGroupName, subsID, "")
+
+ // Report errors if the client is rate limited.
+ if !c.rateLimiterWriter.TryAccept() {
+ mc.RateLimitedCount()
+ return retry.GetRateLimitError(true, "CreateBlobContainer")
+ }
+
+ // Report errors if the client is throttled.
+ if c.RetryAfterWriter.After(c.now()) {
+ mc.ThrottledCount()
+ rerr := retry.GetThrottlingError("CreateBlobContainer", "client throttled", c.RetryAfterWriter)
+ return rerr
+ }
+
+ rerr := c.createContainer(ctx, subsID, resourceGroupName, accountName, containerName, parameters)
+ mc.Observe(rerr)
+ if rerr != nil {
+ if rerr.IsThrottled() {
+ // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+ c.RetryAfterWriter = rerr.RetryAfter
+ }
+
+ return rerr
+ }
+
+ return nil
+}
+
+func (c *Client) createContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string, parameters storage.BlobContainer) *retry.Error {
+ // resourceID format: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}"
+ resourceID := armclient.GetChildResourceID(
+ subsID,
+ resourceGroupName,
+ "Microsoft.Storage/storageAccounts",
+ accountName,
+ "blobServices/default/containers",
+ containerName,
+ )
+
+ response, rerr := c.armClient.PutResource(ctx, resourceID, parameters)
+ defer c.armClient.CloseResponse(ctx, response)
+ if rerr != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "blob_container.put.request", resourceID, rerr.Error())
+ return rerr
+ }
+
+ container := storage.BlobContainer{}
+ err := autorest.Respond(
+ response,
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&container))
+ container.Response = autorest.Response{Response: response}
+
+ return retry.GetError(response, err)
+}
+
+// DeleteContainer deletes a blob container
+func (c *Client) DeleteContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string) *retry.Error {
+ if subsID == "" {
+ subsID = c.subscriptionID
+ }
+
+ mc := metrics.NewMetricContext("blob_container", "delete", resourceGroupName, subsID, "")
+
+ // Report errors if the client is rate limited.
+ if !c.rateLimiterWriter.TryAccept() {
+ mc.RateLimitedCount()
+ return retry.GetRateLimitError(true, "BlobContainerDelete")
+ }
+
+ // Report errors if the client is throttled.
+ if c.RetryAfterWriter.After(c.now()) {
+ mc.ThrottledCount()
+ rerr := retry.GetThrottlingError("BlobContainerDelete", "client throttled", c.RetryAfterWriter)
+ return rerr
+ }
+
+ rerr := c.deleteContainer(ctx, subsID, resourceGroupName, accountName, containerName)
+ mc.Observe(rerr)
+ if rerr != nil {
+ if rerr.IsThrottled() {
+ // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+ c.RetryAfterWriter = rerr.RetryAfter
+ }
+
+ return rerr
+ }
+
+ return nil
+}
+
+func (c *Client) deleteContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string) *retry.Error {
+ // resourceID format: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}"
+ resourceID := armclient.GetChildResourceID(
+ subsID,
+ resourceGroupName,
+ "Microsoft.Storage/storageAccounts",
+ accountName,
+ "blobServices/default/containers",
+ containerName,
+ )
+
+ return c.armClient.DeleteResource(ctx, resourceID)
+}
+
+// GetContainer gets a blob container
+func (c *Client) GetContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string) (storage.BlobContainer, *retry.Error) {
+ if subsID == "" {
+ subsID = c.subscriptionID
+ }
+
+ mc := metrics.NewMetricContext("blob_container", "get", resourceGroupName, subsID, "")
+
+ // Report errors if the client is rate limited.
+ if !c.rateLimiterReader.TryAccept() {
+ mc.RateLimitedCount()
+ return storage.BlobContainer{}, retry.GetRateLimitError(false, "GetBlobContainer")
+ }
+
+ // Report errors if the client is throttled.
+ if c.RetryAfterReader.After(c.now()) {
+ mc.ThrottledCount()
+ rerr := retry.GetThrottlingError("GetBlobContainer", "client throttled", c.RetryAfterReader)
+ return storage.BlobContainer{}, rerr
+ }
+
+ container, rerr := c.getContainer(ctx, subsID, resourceGroupName, accountName, containerName)
+ mc.Observe(rerr)
+ if rerr != nil {
+ if rerr.IsThrottled() {
+ // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+ c.RetryAfterReader = rerr.RetryAfter
+ }
+
+ return container, rerr
+ }
+
+ return container, nil
+}
+
+func (c *Client) getContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string) (storage.BlobContainer, *retry.Error) {
+ // resourceID format: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}"
+ resourceID := armclient.GetChildResourceID(
+ subsID,
+ resourceGroupName,
+ "Microsoft.Storage/storageAccounts",
+ accountName,
+ "blobServices/default/containers",
+ containerName,
+ )
+
+ response, rerr := c.armClient.GetResource(ctx, resourceID)
+ defer c.armClient.CloseResponse(ctx, response)
+ if rerr != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "blob_container.get.request", resourceID, rerr.Error())
+ return storage.BlobContainer{}, rerr
+ }
+
+ container := storage.BlobContainer{}
+
+ err := autorest.Respond(
+ response,
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&container))
+ if err != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "blob_container.get.request", resourceID, err)
+ return container, retry.GetError(response, err)
+ }
+
+ container.Response = autorest.Response{Response: response}
+ return container, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/doc.go
new file mode 100644
index 000000000000..7eac3acbaaca
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package blobclient implements the client for blob container.
+package blobclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/interface.go
new file mode 100644
index 000000000000..b8ac9d8d7f21
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient/interface.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package blobclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+ // APIVersion is the API version for storage.
+ APIVersion = "2021-09-01"
+ // AzureStackCloudAPIVersion is the API version for Azure Stack
+ AzureStackCloudAPIVersion = "2019-06-01"
+ // AzureStackCloudName is the cloud name of Azure Stack
+ AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for creating file shares, interface for test injection.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+ CreateContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string, parameters storage.BlobContainer) *retry.Error
+ DeleteContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string) *retry.Error
+ GetContainer(ctx context.Context, subsID, resourceGroupName, accountName, containerName string) (storage.BlobContainer, *retry.Error)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/azure_containerserviceclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/azure_containerserviceclient.go
index c5ded7221de0..a61e784de1c4 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/azure_containerserviceclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/azure_containerserviceclient.go
@@ -112,13 +112,6 @@ func (c *Client) Get(ctx context.Context, resourceGroupName string, managedClust
// getManagedCluster gets a ManagedCluster.
func (c *Client) getManagedCluster(ctx context.Context, resourceGroupName string, managedClusterName string) (containerservice.ManagedCluster, *retry.Error) {
- // telemetryDecorator := armclient.WithMetricsDecoratorWrapper("managed_clusters", "get", resourceGroupName, c.subscriptionID, "", func(mc *metrics.MetricContext) []autorest.SendDecorator {
- // return []autorest.SendDecorator{
- // armclient.NewErrorCounterDecorator(mc),
- // armclient.NewRateLimitDecorater(c.rateLimiterReader, mc),
- // armclient.NewThrottledDecorater(mc),
- // }
- // })
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/interface.go
index 98ff18d8a15a..477b6b4fb8c8 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/interface.go
@@ -20,6 +20,7 @@ import (
"context"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice"
+
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go
index 53937f2a393c..4e3053e71468 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
index ffcd52096d80..a1fd9f1211ac 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
@@ -19,14 +19,14 @@ package diskclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
// APIVersion is the API version for compute.
- APIVersion = "2021-04-01"
+ APIVersion = "2022-03-02"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2019-03-01"
// AzureStackCloudName is the cloud name of Azure Stack
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go
index e3af24459b97..295423864372 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go
@@ -25,7 +25,7 @@ import (
context "context"
reflect "reflect"
- compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go
index 59f4770a2c7d..737b03fbac6d 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go
@@ -20,7 +20,9 @@ import (
"context"
"fmt"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/to"
"k8s.io/klog/v2"
@@ -35,6 +37,8 @@ type Client struct {
fileServicesClient storage.FileServicesClient
subscriptionID string
+ baseURI string
+ authorizer autorest.Authorizer
}
// ShareOptions contains the fields which are used to create file share.
@@ -51,25 +55,43 @@ type ShareOptions struct {
}
// New creates a azure file client
-func New(config *azclients.ClientConfig) *Client {
- fileSharesClient := storage.NewFileSharesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
- fileSharesClient.Authorizer = config.Authorizer
-
- fileServicesClient := storage.NewFileServicesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
- fileServicesClient.Authorizer = config.Authorizer
+func New(config *azclients.ClientConfig) Interface {
+ baseURI := config.ResourceManagerEndpoint
+ authorizer := config.Authorizer
+ fileSharesClient := storage.NewFileSharesClientWithBaseURI(baseURI, config.SubscriptionID)
+ fileSharesClient.Authorizer = authorizer
+
+ fileServicesClient := storage.NewFileServicesClientWithBaseURI(baseURI, config.SubscriptionID)
+ fileServicesClient.Authorizer = authorizer
return &Client{
fileSharesClient: fileSharesClient,
fileServicesClient: fileServicesClient,
subscriptionID: config.SubscriptionID,
+ baseURI: baseURI,
+ authorizer: authorizer,
}
}
+func (c *Client) WithSubscriptionID(subscriptionID string) Interface {
+ if subscriptionID == "" || subscriptionID == c.subscriptionID {
+ return c
+ }
+
+ return New(&azclients.ClientConfig{
+ SubscriptionID: subscriptionID,
+ ResourceManagerEndpoint: c.baseURI,
+ Authorizer: c.authorizer,
+ })
+}
+
// CreateFileShare creates a file share
-func (c *Client) CreateFileShare(resourceGroupName, accountName string, shareOptions *ShareOptions) error {
+// expand - optional, used to expand the properties within share's properties. Valid values are: snapshots.
+// Should be passed as a string with delimiter ','
+func (c *Client) CreateFileShare(ctx context.Context, resourceGroupName, accountName string, shareOptions *ShareOptions, expand string) (storage.FileShare, error) {
mc := metrics.NewMetricContext("file_shares", "create", resourceGroupName, c.subscriptionID, "")
if shareOptions == nil {
- return fmt.Errorf("share options is nil")
+ return storage.FileShare{}, fmt.Errorf("share options is nil")
}
quota := int32(shareOptions.RequestGiB)
fileShareProperties := &storage.FileShareProperties{
@@ -91,7 +113,7 @@ func (c *Client) CreateFileShare(resourceGroupName, accountName string, shareOpt
Name: &shareOptions.Name,
FileShareProperties: fileShareProperties,
}
- _, err := c.fileSharesClient.Create(context.Background(), resourceGroupName, accountName, shareOptions.Name, fileShare, "")
+ FileShare, err := c.fileSharesClient.Create(ctx, resourceGroupName, accountName, shareOptions.Name, fileShare, expand)
var rerr *retry.Error
if err != nil {
rerr = &retry.Error{
@@ -100,14 +122,16 @@ func (c *Client) CreateFileShare(resourceGroupName, accountName string, shareOpt
}
mc.Observe(rerr)
- return err
+ return FileShare, err
}
// DeleteFileShare deletes a file share
-func (c *Client) DeleteFileShare(resourceGroupName, accountName, name string) error {
+// xMsSnapshot - optional, used to delete a snapshot.
+// It is a DateTime value that uniquely identifies the share snapshot. e.g. "2017-05-10T17:52:33.9551861Z"
+func (c *Client) DeleteFileShare(ctx context.Context, resourceGroupName, accountName, name, xMsSnapshot string) error {
mc := metrics.NewMetricContext("file_shares", "delete", resourceGroupName, c.subscriptionID, "")
- _, err := c.fileSharesClient.Delete(context.Background(), resourceGroupName, accountName, name, "")
+ _, err := c.fileSharesClient.Delete(ctx, resourceGroupName, accountName, name, xMsSnapshot, "")
var rerr *retry.Error
if err != nil {
rerr = &retry.Error{
@@ -120,13 +144,13 @@ func (c *Client) DeleteFileShare(resourceGroupName, accountName, name string) er
}
// ResizeFileShare resizes a file share
-func (c *Client) ResizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error {
+func (c *Client) ResizeFileShare(ctx context.Context, resourceGroupName, accountName, name string, sizeGiB int) error {
mc := metrics.NewMetricContext("file_shares", "resize", resourceGroupName, c.subscriptionID, "")
var rerr *retry.Error
quota := int32(sizeGiB)
- share, err := c.fileSharesClient.Get(context.Background(), resourceGroupName, accountName, name, storage.GetShareExpandStats, "")
+ share, err := c.fileSharesClient.Get(ctx, resourceGroupName, accountName, name, "stats", "")
if err != nil {
rerr = &retry.Error{
RawError: err,
@@ -141,7 +165,7 @@ func (c *Client) ResizeFileShare(resourceGroupName, accountName, name string, si
}
share.FileShareProperties.ShareQuota = "a
- _, err = c.fileSharesClient.Update(context.Background(), resourceGroupName, accountName, name, share)
+ _, err = c.fileSharesClient.Update(ctx, resourceGroupName, accountName, name, share)
if err != nil {
rerr = &retry.Error{
RawError: err,
@@ -157,10 +181,12 @@ func (c *Client) ResizeFileShare(resourceGroupName, accountName, name string, si
}
// GetFileShare gets a file share
-func (c *Client) GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
+// xMsSnapshot - optional, used to retrieve properties of a snapshot.
+// It is a DateTime value that uniquely identifies the share snapshot. e.g. "2017-05-10T17:52:33.9551861Z"
+func (c *Client) GetFileShare(ctx context.Context, resourceGroupName, accountName, name, xMsSnapshot string) (storage.FileShare, error) {
mc := metrics.NewMetricContext("file_shares", "get", resourceGroupName, c.subscriptionID, "")
- result, err := c.fileSharesClient.Get(context.Background(), resourceGroupName, accountName, name, storage.GetShareExpandStats, "")
+ result, err := c.fileSharesClient.Get(ctx, resourceGroupName, accountName, name, "stats", xMsSnapshot)
var rerr *retry.Error
if err != nil {
rerr = &retry.Error{
@@ -172,12 +198,46 @@ func (c *Client) GetFileShare(resourceGroupName, accountName, name string) (stor
return result, err
}
+// ListFileShare gets a file share list
+// expand - optional, used to expand the properties within share's properties. Valid values are: deleted,
+// snapshots. Should be passed as a string with delimiter ','
+func (c *Client) ListFileShare(ctx context.Context, resourceGroupName, accountName, filter, expand string) ([]storage.FileShareItem, error) {
+ mc := metrics.NewMetricContext("file_shares", "list", resourceGroupName, c.subscriptionID, "")
+
+ page, err := c.fileSharesClient.List(ctx, resourceGroupName, accountName, "", filter, expand)
+ var rerr *retry.Error
+ if err != nil {
+ rerr = &retry.Error{
+ RawError: err,
+ }
+ }
+ mc.Observe(rerr)
+
+ result := make([]storage.FileShareItem, 0)
+
+ for {
+ result = append(result, page.Values()...)
+
+ // Abort the loop when there's no nextLink in the response.
+ if to.String(page.Response().NextLink) == "" {
+ break
+ }
+
+ if err = page.NextWithContext(ctx); err != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.list.next", resourceGroupName, err)
+ mc.Observe(retry.GetError(page.Response().Response.Response, err))
+ }
+ }
+
+ return result, err
+}
+
// GetServiceProperties get service properties
-func (c *Client) GetServiceProperties(resourceGroupName, accountName string) (storage.FileServiceProperties, error) {
- return c.fileServicesClient.GetServiceProperties(context.Background(), resourceGroupName, accountName)
+func (c *Client) GetServiceProperties(ctx context.Context, resourceGroupName, accountName string) (storage.FileServiceProperties, error) {
+ return c.fileServicesClient.GetServiceProperties(ctx, resourceGroupName, accountName)
}
// SetServiceProperties set service properties
-func (c *Client) SetServiceProperties(resourceGroupName, accountName string, parameters storage.FileServiceProperties) (storage.FileServiceProperties, error) {
- return c.fileServicesClient.SetServiceProperties(context.Background(), resourceGroupName, accountName, parameters)
+func (c *Client) SetServiceProperties(ctx context.Context, resourceGroupName, accountName string, parameters storage.FileServiceProperties) (storage.FileServiceProperties, error) {
+ return c.fileServicesClient.SetServiceProperties(ctx, resourceGroupName, accountName, parameters)
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go
index e1247ae6b6db..75dbea9c0e9f 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go
@@ -17,16 +17,20 @@ limitations under the License.
package fileclient
import (
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
)
// Interface is the client interface for creating file shares, interface for test injection.
// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
type Interface interface {
- CreateFileShare(resourceGroupName, accountName string, shareOptions *ShareOptions) error
- DeleteFileShare(resourceGroupName, accountName, name string) error
- ResizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error
- GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error)
- GetServiceProperties(resourceGroupName, accountName string) (storage.FileServiceProperties, error)
- SetServiceProperties(resourceGroupName, accountName string, parameters storage.FileServiceProperties) (storage.FileServiceProperties, error)
+ CreateFileShare(ctx context.Context, resourceGroupName, accountName string, shareOptions *ShareOptions, expand string) (storage.FileShare, error)
+ DeleteFileShare(ctx context.Context, resourceGroupName, accountName, name, xMsSnapshot string) error
+ ResizeFileShare(ctx context.Context, resourceGroupName, accountName, name string, sizeGiB int) error
+ GetFileShare(ctx context.Context, resourceGroupName, accountName, name, xMsSnapshot string) (storage.FileShare, error)
+ ListFileShare(ctx context.Context, resourceGroupName, accountName, filter, expand string) ([]storage.FileShareItem, error)
+ GetServiceProperties(ctx context.Context, resourceGroupName, accountName string) (storage.FileServiceProperties, error)
+ SetServiceProperties(ctx context.Context, resourceGroupName, accountName string, parameters storage.FileServiceProperties) (storage.FileServiceProperties, error)
+ WithSubscriptionID(subscriptionID string) Interface
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/azure_interfaceclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/azure_interfaceclient.go
index eed60995091a..11a3b98f2f3a 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/azure_interfaceclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/azure_interfaceclient.go
@@ -55,6 +55,8 @@ type Client struct {
// ARM throttling configures.
RetryAfterReader time.Time
RetryAfterWriter time.Time
+
+ computeAPIVersion string
}
// New creates a new network interface client with ratelimiting.
@@ -77,6 +79,11 @@ func New(config *azclients.ClientConfig) *Client {
config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
}
+ computeAPIVersion := ComputeAPIVersion
+ if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+ computeAPIVersion = AzureStackComputeAPIVersion
+ }
+
client := &Client{
armClient: armClient,
rateLimiterReader: rateLimiterReader,
@@ -84,6 +91,7 @@ func New(config *azclients.ClientConfig) *Client {
subscriptionID: config.SubscriptionID,
cloudName: config.CloudName,
disableAzureStackCloud: config.DisableAzureStackCloud,
+ computeAPIVersion: computeAPIVersion,
}
return client
@@ -192,20 +200,8 @@ func (c *Client) getVMSSNetworkInterface(ctx context.Context, resourceGroupName
)
result := network.Interface{}
- computeAPIVersion := ComputeAPIVersion
- if strings.EqualFold(c.cloudName, AzureStackCloudName) && !c.disableAzureStackCloud {
- computeAPIVersion = AzureStackComputeAPIVersion
- }
- queryParameters := map[string]interface{}{
- "api-version": computeAPIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
- decorators := []autorest.PrepareDecorator{
- autorest.WithQueryParameters(queryParameters),
- }
- response, rerr := c.armClient.GetResource(ctx, resourceID, decorators...)
+
+ response, rerr := c.armClient.GetResourceWithExpandAPIVersionQuery(ctx, resourceID, expand, c.computeAPIVersion)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssnic.get.request", resourceID, rerr.Error())
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/azure_privatelinkserviceclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/azure_privatelinkserviceclient.go
index 4477acf345a6..bede7e41d26d 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/azure_privatelinkserviceclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/azure_privatelinkserviceclient.go
@@ -214,7 +214,7 @@ func (c *Client) getPLS(ctx context.Context, resourceGroupName string, privateLi
return result, nil
}
-/// List gets a list of PrivateLinkServices in the resource group.
+// List gets a list of PrivateLinkServices in the resource group.
func (c *Client) List(ctx context.Context, resourceGroupName string) ([]network.PrivateLinkService, *retry.Error) {
mc := metrics.NewMetricContext("private_link_services", "list", resourceGroupName, c.subscriptionID, "")
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go
index 5aef7b970166..3f76b8425e4d 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go
@@ -55,6 +55,8 @@ type Client struct {
// ARM throttling configures.
RetryAfterReader time.Time
RetryAfterWriter time.Time
+
+ computeAPIVersion string
}
// New creates a new PublicIPAddress client with ratelimiting.
@@ -77,6 +79,11 @@ func New(config *azclients.ClientConfig) *Client {
config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
}
+ computeAPIVersion := ComputeAPIVersion
+ if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+ computeAPIVersion = AzureStackComputeAPIVersion
+ }
+
client := &Client{
armClient: armClient,
rateLimiterReader: rateLimiterReader,
@@ -84,6 +91,7 @@ func New(config *azclients.ClientConfig) *Client {
subscriptionID: config.SubscriptionID,
cloudName: config.CloudName,
disableAzureStackCloud: config.DisableAzureStackCloud,
+ computeAPIVersion: computeAPIVersion,
}
return client
@@ -194,20 +202,7 @@ func (c *Client) getVMSSPublicIPAddress(ctx context.Context, resourceGroupName s
)
result := network.PublicIPAddress{}
- computeAPIVersion := ComputeAPIVersion
- if strings.EqualFold(c.cloudName, AzureStackCloudName) && !c.disableAzureStackCloud {
- computeAPIVersion = AzureStackComputeAPIVersion
- }
- queryParameters := map[string]interface{}{
- "api-version": computeAPIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
- decorators := []autorest.PrepareDecorator{
- autorest.WithQueryParameters(queryParameters),
- }
- response, rerr := c.armClient.GetResource(ctx, resourceID, decorators...)
+ response, rerr := c.armClient.GetResourceWithExpandAPIVersionQuery(ctx, resourceID, expand, c.computeAPIVersion)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmsspublicip.get.request", resourceID, rerr.Error())
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go
index 54ff72ee394a..b67d6ca1d725 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go
index 731db0d291c5..cfd1236d76e8 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go
@@ -19,14 +19,14 @@ package snapshotclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
// APIVersion is the API version for compute.
- APIVersion = "2020-12-01"
+ APIVersion = "2022-03-02"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2019-03-01"
// AzureStackCloudName is the cloud name of Azure Stack
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go
index 6c9809abf1d5..b72f36ef2a7a 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go
@@ -25,7 +25,7 @@ import (
context "context"
reflect "reflect"
- compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go
index 4acb3e2b953d..35e0bb6d185b 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/interface.go
index bce755c39df5..a693c7cb0c40 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/interface.go
@@ -19,7 +19,7 @@ package storageaccountclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go
index 2bc75a0ab43d..13c0990569b2 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go
@@ -25,7 +25,7 @@ import (
context "context"
reflect "reflect"
- storage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ storage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go
index cb513c18e708..8d2ab07de1b6 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go
@@ -27,6 +27,7 @@ import (
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
+
azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go
index ff8c6c2df821..5087bf601092 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go
@@ -20,6 +20,7 @@ import (
"context"
"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go
index 59372fb251eb..9e2ab7459143 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go
index 5f0a1c552ed8..4e303f16bb1b 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go
@@ -19,13 +19,14 @@ package vmasclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
// APIVersion is the API version for VMAS.
- APIVersion = "2020-12-01"
+ APIVersion = "2022-03-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2019-07-01"
// AzureStackCloudName is the cloud name of Azure Stack
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go
index f5d1a253bcaa..c3357964e31d 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
@@ -217,6 +217,112 @@ func (c *Client) listVM(ctx context.Context, resourceGroupName string) ([]comput
return result, nil
}
+// ListVmssFlexVMsWithoutInstanceView gets a list of VirtualMachine in the VMSS Flex without InstanceView.
+func (c *Client) ListVmssFlexVMsWithoutInstanceView(ctx context.Context, vmssFlexID string) ([]compute.VirtualMachine, *retry.Error) {
+ mc := metrics.NewMetricContext("vm", "list", "", c.subscriptionID, "")
+
+ // Report errors if the client is rate limited.
+ if !c.rateLimiterReader.TryAccept() {
+ mc.RateLimitedCount()
+ return nil, retry.GetRateLimitError(false, "VMList")
+ }
+
+ // Report errors if the client is throttled.
+ if c.RetryAfterReader.After(time.Now()) {
+ mc.ThrottledCount()
+ rerr := retry.GetThrottlingError("VMList", "client throttled", c.RetryAfterReader)
+ return nil, rerr
+ }
+
+ result, rerr := c.listVmssFlexVMs(ctx, vmssFlexID, false)
+ mc.Observe(rerr)
+ if rerr != nil {
+ if rerr.IsThrottled() {
+ // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+ c.RetryAfterReader = rerr.RetryAfter
+ }
+
+ return result, rerr
+ }
+
+ return result, nil
+}
+
+// ListVmssFlexVMsWithOnlyInstanceView gets a list of VirtualMachine in the VMSS Flex with only InstanceView.
+func (c *Client) ListVmssFlexVMsWithOnlyInstanceView(ctx context.Context, vmssFlexID string) ([]compute.VirtualMachine, *retry.Error) {
+ mc := metrics.NewMetricContext("vm", "list", "", c.subscriptionID, "")
+
+ // Report errors if the client is rate limited.
+ if !c.rateLimiterReader.TryAccept() {
+ mc.RateLimitedCount()
+ return nil, retry.GetRateLimitError(false, "VMList")
+ }
+
+ // Report errors if the client is throttled.
+ if c.RetryAfterReader.After(time.Now()) {
+ mc.ThrottledCount()
+ rerr := retry.GetThrottlingError("VMList", "client throttled", c.RetryAfterReader)
+ return nil, rerr
+ }
+
+ result, rerr := c.listVmssFlexVMs(ctx, vmssFlexID, true)
+ mc.Observe(rerr)
+ if rerr != nil {
+ if rerr.IsThrottled() {
+ // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+ c.RetryAfterReader = rerr.RetryAfter
+ }
+
+ return result, rerr
+ }
+
+ return result, nil
+}
+
+// listVmssFlexVMs gets a list of VirtualMachines in the VMSS Flex.
+func (c *Client) listVmssFlexVMs(ctx context.Context, vmssFlexID string, statusOnly bool) ([]compute.VirtualMachine, *retry.Error) {
+ resourceID := armclient.GetProviderResourceID(c.subscriptionID, vmResourceType)
+
+ result := make([]compute.VirtualMachine, 0)
+ page := &VirtualMachineListResultPage{}
+ page.fn = c.listNextResults
+
+ queries := make(map[string]interface{})
+ queries["$filter"] = "'virtualMachineScaleSet/id' eq '" + vmssFlexID + "'"
+ if statusOnly {
+ queries["statusOnly"] = true
+ }
+ resp, rerr := c.armClient.GetResourceWithQueries(ctx, resourceID, queries)
+ defer c.armClient.CloseResponse(ctx, resp)
+ if rerr != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vm.list.request", resourceID, rerr.Error())
+ return result, rerr
+ }
+
+ var err error
+ page.vmlr, err = c.listResponder(resp)
+ if err != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vm.list.respond", resourceID, err)
+ return result, retry.GetError(resp, err)
+ }
+
+ for {
+ result = append(result, page.Values()...)
+
+ // Abort the loop when there's no nextLink in the response.
+ if to.String(page.Response().NextLink) == "" {
+ break
+ }
+
+ if err = page.NextWithContext(ctx); err != nil {
+ klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vm.list.next", resourceID, err)
+ return result, retry.GetError(page.Response().Response.Response, err)
+ }
+ }
+
+ return result, nil
+}
+
// Update updates a VirtualMachine.
func (c *Client) Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error {
mc := metrics.NewMetricContext("vm", "update", resourceGroupName, c.subscriptionID, source)
@@ -291,6 +397,7 @@ func (c *Client) WaitForUpdateResult(ctx context.Context, future *azure.Future,
mc := metrics.NewMetricContext("vm", "wait_for_update_result", resourceGroupName, c.subscriptionID, source)
response, err := c.armClient.WaitForAsyncOperationResult(ctx, future, "VMWaitForUpdateResult")
mc.Observe(retry.NewErrorOrNil(false, err))
+ defer c.armClient.CloseResponse(ctx, response)
if err != nil {
if response != nil {
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go
index c521c898c23b..5c41811df258 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go
@@ -19,7 +19,7 @@ package vmclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
@@ -27,7 +27,7 @@ import (
const (
// APIVersion is the API version for VirtualMachine.
- APIVersion = "2020-12-01"
+ APIVersion = "2022-03-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2017-12-01"
// AzureStackCloudName is the cloud name of Azure Stack
@@ -43,6 +43,12 @@ type Interface interface {
// List gets a list of VirtualMachines in the resourceGroupName.
List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachine, *retry.Error)
+ // ListVmssFlexVMsWithoutInstanceView gets a list of VirtualMachine in the VMSS Flex without InstanceView.
+ ListVmssFlexVMsWithoutInstanceView(ctx context.Context, vmssFlexID string) ([]compute.VirtualMachine, *retry.Error)
+
+ // ListVmssFlexVMsWithOnlyInstanceView gets a list of VirtualMachine in the VMSS Flex with only InstanceView.
+ ListVmssFlexVMsWithOnlyInstanceView(ctx context.Context, vmssFlexID string) ([]compute.VirtualMachine, *retry.Error)
+
// CreateOrUpdate creates or updates a VirtualMachine.
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) *retry.Error
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go
index b204a97bebf9..4ecd399f50f8 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go
@@ -25,7 +25,7 @@ import (
context "context"
reflect "reflect"
- compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
azure "github.com/Azure/go-autorest/autorest/azure"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
@@ -112,6 +112,36 @@ func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName)
}
+// ListVmssFlexVMsWithOnlyInstanceView mocks base method.
+func (m *MockInterface) ListVmssFlexVMsWithOnlyInstanceView(ctx context.Context, vmssFlexID string) ([]compute.VirtualMachine, *retry.Error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListVmssFlexVMsWithOnlyInstanceView", ctx, vmssFlexID)
+ ret0, _ := ret[0].([]compute.VirtualMachine)
+ ret1, _ := ret[1].(*retry.Error)
+ return ret0, ret1
+}
+
+// ListVmssFlexVMsWithOnlyInstanceView indicates an expected call of ListVmssFlexVMsWithOnlyInstanceView.
+func (mr *MockInterfaceMockRecorder) ListVmssFlexVMsWithOnlyInstanceView(ctx, vmssFlexID interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVmssFlexVMsWithOnlyInstanceView", reflect.TypeOf((*MockInterface)(nil).ListVmssFlexVMsWithOnlyInstanceView), ctx, vmssFlexID)
+}
+
+// ListVmssFlexVMsWithoutInstanceView mocks base method.
+func (m *MockInterface) ListVmssFlexVMsWithoutInstanceView(ctx context.Context, vmssFlexID string) ([]compute.VirtualMachine, *retry.Error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListVmssFlexVMsWithoutInstanceView", ctx, vmssFlexID)
+ ret0, _ := ret[0].([]compute.VirtualMachine)
+ ret1, _ := ret[1].(*retry.Error)
+ return ret0, ret1
+}
+
+// ListVmssFlexVMsWithoutInstanceView indicates an expected call of ListVmssFlexVMsWithoutInstanceView.
+func (mr *MockInterfaceMockRecorder) ListVmssFlexVMsWithoutInstanceView(ctx, vmssFlexID interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVmssFlexVMsWithoutInstanceView", reflect.TypeOf((*MockInterface)(nil).ListVmssFlexVMsWithoutInstanceView), ctx, vmssFlexID)
+}
+
// Update mocks base method.
func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error {
m.ctrl.T.Helper()
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go
index 8ec2c0ae5b21..b6da2d422867 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go
@@ -23,7 +23,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go
index 215361e26594..8c595d0f3f82 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go
@@ -19,13 +19,14 @@ package vmsizeclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
// APIVersion is the API version for compute.
- APIVersion = "2020-12-01"
+ APIVersion = "2022-03-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2017-12-01"
// AzureStackCloudName is the cloud name of Azure Stack
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go
index 4a3a5c946ba4..d2c194e4991e 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go
index dcd33f9df91b..12f414e1c9d8 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go
@@ -20,7 +20,7 @@ import (
"context"
"net/http"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
@@ -28,7 +28,7 @@ import (
const (
// APIVersion is the API version for VMSS.
- APIVersion = "2020-12-01"
+ APIVersion = "2022-03-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2019-07-01"
// AzureStackCloudName is the cloud name of Azure Stack
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go
index 449983234987..bba12610a00b 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go
@@ -26,7 +26,7 @@ import (
http "net/http"
reflect "reflect"
- compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
azure "github.com/Azure/go-autorest/autorest/azure"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go
index 04f47ac68a52..4f44248cedd6 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
@@ -33,6 +33,7 @@ import (
azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/consts"
"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
@@ -230,34 +231,32 @@ func (c *Client) listVMSSVM(ctx context.Context, resourceGroupName string, virtu
}
// Update updates a VirtualMachineScaleSetVM.
-func (c *Client) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error {
+func (c *Client) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*compute.VirtualMachineScaleSetVM, *retry.Error) {
mc := metrics.NewMetricContext("vmssvm", "update", resourceGroupName, c.subscriptionID, source)
// Report errors if the client is rate limited.
if !c.rateLimiterWriter.TryAccept() {
mc.RateLimitedCount()
- return retry.GetRateLimitError(true, "VMSSVMUpdate")
+ return nil, retry.GetRateLimitError(true, "VMSSVMUpdate")
}
// Report errors if the client is throttled.
if c.RetryAfterWriter.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("VMSSVMUpdate", "client throttled", c.RetryAfterWriter)
- return rerr
+ return nil, rerr
}
- rerr := c.updateVMSSVM(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
+ result, rerr := c.updateVMSSVM(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
mc.Observe(rerr)
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterWriter = rerr.RetryAfter
}
-
- return rerr
}
- return nil
+ return result, rerr
}
// UpdateAsync updates a VirtualMachineScaleSetVM asynchronously
@@ -301,23 +300,37 @@ func (c *Client) UpdateAsync(ctx context.Context, resourceGroupName string, VMSc
}
// WaitForUpdateResult waits for the response of the update request
-func (c *Client) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error {
+func (c *Client) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) (*compute.VirtualMachineScaleSetVM, *retry.Error) {
mc := metrics.NewMetricContext("vmss", "wait_for_update_result", resourceGroupName, c.subscriptionID, source)
response, err := c.armClient.WaitForAsyncOperationResult(ctx, future, "VMSSWaitForUpdateResult")
mc.Observe(retry.NewErrorOrNil(false, err))
+ defer c.armClient.CloseResponse(ctx, response)
+
if err != nil {
if response != nil {
klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', response code %d", err.Error(), response.StatusCode)
} else {
klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', no response", err.Error())
}
- return retry.GetError(response, err)
+ return nil, retry.GetError(response, err)
}
- return nil
+
+ if response != nil && response.StatusCode != http.StatusNoContent {
+ result, rerr := c.updateResponder(response)
+ if rerr != nil {
+ klog.V(5).Infof("Received error in WaitForAsyncOperationResult updateResponder: '%s'", rerr.Error())
+ }
+
+ return result, rerr
+ }
+
+ result := &compute.VirtualMachineScaleSetVM{}
+ result.Response = autorest.Response{Response: response}
+ return result, nil
}
// updateVMSSVM updates a VirtualMachineScaleSetVM.
-func (c *Client) updateVMSSVM(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) *retry.Error {
+func (c *Client) updateVMSSVM(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (*compute.VirtualMachineScaleSetVM, *retry.Error) {
resourceID := armclient.GetChildResourceID(
c.subscriptionID,
resourceGroupName,
@@ -331,18 +344,20 @@ func (c *Client) updateVMSSVM(ctx context.Context, resourceGroupName string, VMS
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.put.request", resourceID, rerr.Error())
- return rerr
+ return nil, rerr
}
if response != nil && response.StatusCode != http.StatusNoContent {
- _, rerr = c.updateResponder(response)
+ result, rerr := c.updateResponder(response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.put.respond", resourceID, rerr.Error())
- return rerr
}
+ return result, rerr
}
- return nil
+ result := &compute.VirtualMachineScaleSetVM{}
+ result.Response = autorest.Response{Response: response}
+ return result, nil
}
func (c *Client) updateResponder(resp *http.Response) (*compute.VirtualMachineScaleSetVM, *retry.Error) {
@@ -503,6 +518,22 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM
defer c.armClient.CloseResponse(ctx, resp.Response)
if resp.Error != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.put.request", resourceID, resp.Error.Error())
+
+ errMsg := resp.Error.Error().Error()
+ if strings.Contains(errMsg, consts.VmssVMNotActiveErrorMessage) {
+ klog.V(2).Infof("VMSS VM %s is not active, skip updating it.", resourceID)
+ continue
+ }
+ if strings.Contains(errMsg, consts.ParentResourceNotFoundMessageCode) {
+ klog.V(2).Info("The parent resource of VMSS VM %s is not found, skip updating it.", resourceID)
+ continue
+ }
+ if strings.Contains(errMsg, consts.CannotUpdateVMBeingDeletedMessagePrefix) &&
+ strings.Contains(errMsg, consts.CannotUpdateVMBeingDeletedMessageSuffix) {
+ klog.V(2).Infof("The VM %s is being deleted, skip updating it.", resourceID)
+ continue
+ }
+
errors = append(errors, resp.Error)
continue
}
@@ -521,7 +552,12 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM
rerr := &retry.Error{}
errs := make([]error, 0)
for _, err := range errors {
- if err.IsThrottled() && err.RetryAfter.After(err.RetryAfter) {
+ if !err.Retriable && strings.Contains(err.Error().Error(), consts.ConcurrentRequestConflictMessage) {
+ err.Retriable = true
+ err.RetryAfter = time.Now().Add(5 * time.Second)
+ }
+
+ if err.IsThrottled() && err.RetryAfter.After(rerr.RetryAfter) {
rerr.RetryAfter = err.RetryAfter
}
errs = append(errs, err.Error())
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go
index 001dba25b8a7..22ec029c1d65 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go
@@ -19,7 +19,7 @@ package vmssvmclient
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
@@ -27,7 +27,7 @@ import (
const (
// APIVersion is the API version for VMSS.
- APIVersion = "2020-12-01"
+ APIVersion = "2022-03-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2019-07-01"
// AzureStackCloudName is the cloud name of Azure Stack
@@ -44,13 +44,13 @@ type Interface interface {
List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) ([]compute.VirtualMachineScaleSetVM, *retry.Error)
// Update updates a VirtualMachineScaleSetVM.
- Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error
+ Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*compute.VirtualMachineScaleSetVM, *retry.Error)
// UpdateAsync updates a VirtualMachineScaleSetVM asynchronously
UpdateAsync(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*azure.Future, *retry.Error)
// WaitForUpdateResult waits for the response of the update request
- WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error
+ WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) (*compute.VirtualMachineScaleSetVM, *retry.Error)
// UpdateVMs updates a list of VirtualMachineScaleSetVM from map[instanceID]compute.VirtualMachineScaleSetVM.
UpdateVMs(ctx context.Context, resourceGroupName string, VMScaleSetName string, instances map[string]compute.VirtualMachineScaleSetVM, source string, batchSize int) *retry.Error
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go
index 918d22be733f..962c349f8960 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go
@@ -25,7 +25,7 @@ import (
context "context"
reflect "reflect"
- compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
azure "github.com/Azure/go-autorest/autorest/azure"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
@@ -85,11 +85,12 @@ func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualMachine
}
// Update mocks base method.
-func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error {
+func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*compute.VirtualMachineScaleSetVM, *retry.Error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source)
- ret0, _ := ret[0].(*retry.Error)
- return ret0
+ ret0, _ := ret[0].(*compute.VirtualMachineScaleSetVM)
+ ret1, _ := ret[1].(*retry.Error)
+ return ret0, ret1
}
// Update indicates an expected call of Update.
@@ -128,11 +129,12 @@ func (mr *MockInterfaceMockRecorder) UpdateVMs(ctx, resourceGroupName, VMScaleSe
}
// WaitForUpdateResult mocks base method.
-func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error {
+func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) (*compute.VirtualMachineScaleSetVM, *retry.Error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source)
- ret0, _ := ret[0].(*retry.Error)
- return ret0
+ ret0, _ := ret[0].(*compute.VirtualMachineScaleSetVM)
+ ret1, _ := ret[1].(*retry.Error)
+ return ret0, ret1
}
// WaitForUpdateResult indicates an expected call of WaitForUpdateResult.
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go
index dd936db482c6..d165c1f5f771 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go
@@ -22,6 +22,8 @@ import (
"time"
"k8s.io/client-go/tools/cache"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy"
)
// AzureCacheReadType defines the read type for cache data
@@ -122,6 +124,17 @@ func (t *TimedCache) getInternal(key string) (*AzureCacheEntry, error) {
// Get returns the requested item by key.
func (t *TimedCache) Get(key string, crt AzureCacheReadType) (interface{}, error) {
+ return t.get(key, crt)
+}
+
+// Get returns the requested item by key with deep copy.
+func (t *TimedCache) GetWithDeepCopy(key string, crt AzureCacheReadType) (interface{}, error) {
+ data, err := t.get(key, crt)
+ copied := deepcopy.Copy(data)
+ return copied, err
+}
+
+func (t *TimedCache) get(key string, crt AzureCacheReadType) (interface{}, error) {
entry, err := t.getInternal(key)
if err != nil {
return nil, err
@@ -173,3 +186,12 @@ func (t *TimedCache) Set(key string, data interface{}) {
CreatedOn: time.Now().UTC(),
})
}
+
+// Update updates the data cache for the key.
+func (t *TimedCache) Update(key string, data interface{}) {
+ _ = t.Store.Update(&AzureCacheEntry{
+ Key: key,
+ Data: data,
+ CreatedOn: time.Now().UTC(),
+ })
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go
index 4f07c40acf17..04838b5d1d3c 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go
@@ -19,9 +19,8 @@ package consts
import (
"time"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
-
- "k8s.io/component-base/featuregate"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
)
const (
@@ -29,6 +28,8 @@ const (
VMTypeVMSS = "vmss"
// VMTypeStandard is the vmas vm type
VMTypeStandard = "standard"
+ // VMTypeVmssFlex is the vmssflex vm type
+ VMTypeVmssFlex = "vmssflex"
// ExternalResourceGroupLabel is the label representing the node is in a different
// resource group from other cloud provider components
@@ -73,10 +74,6 @@ const (
DiskEncryptionSetIDFormat = "/subscriptions/{subs-id}/resourceGroups/{rg-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name}"
- // IPv6DualStack is here to avoid having to import features pkg
- // and violate import rules
- IPv6DualStack featuregate.Feature = "IPv6DualStack"
-
// MachineIDTemplate is the template of the virtual machine
MachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
// AvailabilitySetIDTemplate is the template of the availabilitySet ID
@@ -131,6 +128,8 @@ const (
TagKeyValueDelimiter = "="
// VMSetNamesSharingPrimarySLBDelimiter is the delimiter of vmSet names sharing the primary SLB
VMSetNamesSharingPrimarySLBDelimiter = ","
+ // PremiumV2_LRS type for Azure Disk
+ PremiumV2LRS = compute.DiskStorageAccountTypes("PremiumV2_LRS")
)
// cache
@@ -141,8 +140,21 @@ const (
VMSSKey = "k8svmssKey"
// VMASKey is the key when querying vmss cache
VMASKey = "k8svmasKey"
+ // NonVmssUniformNodesKey is the key when querying nonVmssUniformNodes cache
+ NonVmssUniformNodesKey = "k8sNonVmssUniformNodesKey"
// AvailabilitySetNodesKey is the availability set nodes key
AvailabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
+
+ // VmssFlexKey is the key when querying vmssFlexVM cache
+ VmssFlexKey = "k8sVmssFlexKey"
+
+ // GetNodeVmssFlexIDLockKey is the key for getting the lock for getNodeVmssFlexID function
+ GetNodeVmssFlexIDLockKey = "k8sGetNodeVmssFlexIDLockKey"
+ // VMManagementTypeLockKey is the key for getting the lock for getVMManagementType function
+ VMManagementTypeLockKey = "VMManagementType"
+
+ // NonVmssUniformNodesCacheTTLDefaultInSeconds is the TTL of the non vmss uniform node cache
+ NonVmssUniformNodesCacheTTLDefaultInSeconds = 900
// AvailabilitySetNodesCacheTTLDefaultInSeconds is the TTL of the availabilitySet node cache
AvailabilitySetNodesCacheTTLDefaultInSeconds = 900
// VMSSCacheTTLDefaultInSeconds is the TTL of the vmss cache
@@ -152,6 +164,11 @@ const (
// VMASCacheTTLDefaultInSeconds is the TTL of the vmas cache
VMASCacheTTLDefaultInSeconds = 600
+ // VmssFlexCacheTTLDefaultInSeconds is the TTL of the vmss flex cache
+ VmssFlexCacheTTLDefaultInSeconds = 600
+ // VmssFlexVMCacheTTLDefaultInSeconds is the TTL of the vmss flex vm cache
+ VmssFlexVMCacheTTLDefaultInSeconds = 600
+
// ZoneFetchingInterval defines the interval of performing zoneClient.GetZones
ZoneFetchingInterval = 30 * time.Minute
)
@@ -176,6 +193,16 @@ const (
BackoffJitterDefault = 1.0
)
+// LB variables for dual-stack
+var (
+ // Service.Spec.LoadBalancerIP has been deprecated and may be removed in a future release. Those two annotations are introduced as alternatives to set IPv4/IPv6 LoadBalancer IPs.
+ // Refer https://github.com/kubernetes/api/blob/3638040e4063e0f889c129220cd386497f328276/core/v1/types.go#L4459-L4468 for more details.
+ ServiceAnnotationLoadBalancerIPDualStack = map[bool]string{
+ false: "service.beta.kubernetes.io/azure-load-balancer-ipv4",
+ true: "service.beta.kubernetes.io/azure-load-balancer-ipv6",
+ }
+)
+
// load balancer
const (
// PreConfiguredBackendPoolLoadBalancerTypesNone means that the load balancers are not pre-configured
@@ -273,7 +300,7 @@ const (
// ServiceAnnotationLoadBalancerHealthProbeRequestPath determines the request path of the load balancer health probe.
// This is only useful for the HTTP and HTTPS, and would be ignored when using TCP. If not set,
- // `/healthz` would be configured by default.
+ // `/` would be configured by default.
ServiceAnnotationLoadBalancerHealthProbeRequestPath = "service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path"
// ServiceAnnotationAzurePIPTags determines what tags should be applied to the public IP of the service. The cluster name
@@ -281,6 +308,10 @@ const (
// is `a=b,c=d,...`. After updated, the old user-assigned tags would not be replaced by the new ones.
ServiceAnnotationAzurePIPTags = "service.beta.kubernetes.io/azure-pip-tags"
+ // ServiceAnnotationDisableLoadBalancerFloatingIP is the annotation used on the service to disable floating IP in load balancer rule.
+ // If omitted, the default value is false
+ ServiceAnnotationDisableLoadBalancerFloatingIP = "service.beta.kubernetes.io/azure-disable-load-balancer-floating-ip"
+
// ServiceAnnotationAzurePIPTags sets the additional Public IPs (split by comma) besides the service's Public IP configured on LoadBalancer.
// These additional Public IPs would be consumed by kube-proxy to configure the iptables rules on each node. Note they would not be configured
// automatically on Azure LoadBalancer. Instead, they need to be configured manually (e.g. on Azure cross-region LoadBalancer by another operator).
@@ -344,6 +375,14 @@ const (
CannotDeletePublicIPErrorMessageCode = "PublicIPAddressCannotBeDeleted"
// ReferencedResourceNotProvisionedMessageCode means the referenced resource has not been provisioned
ReferencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned"
+ // ParentResourceNotFoundMessageCode is the error code that the parent VMSS of the VM is not found.
+ ParentResourceNotFoundMessageCode = "ParentResourceNotFound"
+ // ConcurrentRequestConflictMessage is the error message that the request failed due to the conflict with another concurrent operation.
+ ConcurrentRequestConflictMessage = "The request failed due to conflict with a concurrent request."
+ // CannotUpdateVMBeingDeletedMessagePrefix is the prefix of the error message that the request failed due to delete a VM that is being deleted
+ CannotUpdateVMBeingDeletedMessagePrefix = "'Put on Virtual Machine Scale Set VM Instance' is not allowed on Virtual Machine Scale Set"
+ // CannotUpdateVMBeingDeletedMessageSuffix is the suffix of the error message that the request failed due to delete a VM that is being deleted
+ CannotUpdateVMBeingDeletedMessageSuffix = "since it is marked for deletion"
)
// node ipam controller
@@ -389,9 +428,27 @@ const RateLimited = "rate limited"
// CreatedByTag tag key for CSI drivers
const CreatedByTag = "k8s-azure-created-by"
+// port specific
+const (
+ PortAnnotationPrefixPattern = "service.beta.kubernetes.io/port_%d_%s"
+ PortAnnotationNoLBRule PortParams = "no_lb_rule"
+ // NoHealthProbeRule determines whether the port is only used for health probe. no lb probe rule will be created.
+ PortAnnotationNoHealthProbeRule PortParams = "no_probe_rule"
+)
+
+type PortParams string
+
// health probe
const (
- HealthProbeAnnotationPrefixPattern = "service.beta.kubernetes.io/port_%d_health-probe_"
+ HealthProbeAnnotationPrefixPattern = "health-probe_%s"
+
+ // HealthProbeParamsProtocol determines the protocol for the health probe params.
+ // It always takes priority over spec.appProtocol or any other specified protocol
+ HealthProbeParamsProtocol HealthProbeParams = "protocol"
+
+ // HealthProbeParamsPort determines the probe port for the health probe params.
+ // It always takes priority over the NodePort of the spec.ports in a Service
+ HealthProbeParamsPort HealthProbeParams = "port"
// HealthProbeParamsProbeInterval determines the probe interval of the load balancer health probe.
// The minimum probe interval is 5 seconds and the default value is 5. The total duration of all intervals cannot exceed 120 seconds.
@@ -457,3 +514,7 @@ const (
// Default number of IP configs for PLS
PLSDefaultNumOfIPConfig = 1
)
+
+const (
+ VMSSTagForBatchOperation = "aks-managed-coordination"
+)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go
index 44296f3d945c..b800e9c64458 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go
@@ -26,7 +26,7 @@ import (
"k8s.io/utils/net"
)
-// IsK8sServiceHasHAModeEnabled return if HA Mode is enabled in kuberntes service annotations
+// IsK8sServiceHasHAModeEnabled return if HA Mode is enabled in kubernetes service annotations
func IsK8sServiceHasHAModeEnabled(service *v1.Service) bool {
return expectAttributeInSvcAnnotationBeEqualTo(service.Annotations, ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts, TrueAnnotationValue)
}
@@ -40,11 +40,26 @@ func IsK8sServiceInternalIPv6(service *v1.Service) bool {
return IsK8sServiceUsingInternalLoadBalancer(service) && net.IsIPv6String(service.Spec.ClusterIP)
}
+// IsK8sServiceDisableLoadBalancerFloatingIP return if floating IP in load balancer is disabled in kubernetes service annotations
+func IsK8sServiceDisableLoadBalancerFloatingIP(service *v1.Service) bool {
+ return expectAttributeInSvcAnnotationBeEqualTo(service.Annotations, ServiceAnnotationDisableLoadBalancerFloatingIP, TrueAnnotationValue)
+}
+
// GetHealthProbeConfigOfPortFromK8sSvcAnnotation get health probe configuration for port
func GetHealthProbeConfigOfPortFromK8sSvcAnnotation(annotations map[string]string, port int32, key HealthProbeParams, validators ...BusinessValidator) (*string, error) {
return GetAttributeValueInSvcAnnotation(annotations, BuildHealthProbeAnnotationKeyForPort(port, key), validators...)
}
+// IsHealthProbeRuleOnK8sServicePortDisabled return if port is for health probe only
+func IsHealthProbeRuleOnK8sServicePortDisabled(annotations map[string]string, port int32) (bool, error) {
+ return expectAttributeInSvcAnnotationBeEqualTo(annotations, BuildAnnotationKeyForPort(port, PortAnnotationNoHealthProbeRule), TrueAnnotationValue), nil
+}
+
+// IsHealthProbeRuleOnK8sServicePortDisabled return if port is for health probe only
+func IsLBRuleOnK8sServicePortDisabled(annotations map[string]string, port int32) (bool, error) {
+ return expectAttributeInSvcAnnotationBeEqualTo(annotations, BuildAnnotationKeyForPort(port, PortAnnotationNoLBRule), TrueAnnotationValue), nil
+}
+
// Getint32ValueFromK8sSvcAnnotation get health probe configuration for port
func Getint32ValueFromK8sSvcAnnotation(annotations map[string]string, key string, validators ...Int32BusinessValidator) (*int32, error) {
val, err := GetAttributeValueInSvcAnnotation(annotations, key)
@@ -54,9 +69,14 @@ func Getint32ValueFromK8sSvcAnnotation(annotations map[string]string, key string
return nil, err
}
+// BuildHealthProbeAnnotationKeyForPort get health probe configuration key for port
+func BuildAnnotationKeyForPort(port int32, key PortParams) string {
+ return fmt.Sprintf(PortAnnotationPrefixPattern, port, string(key))
+}
+
// BuildHealthProbeAnnotationKeyForPort get health probe configuration key for port
func BuildHealthProbeAnnotationKeyForPort(port int32, key HealthProbeParams) string {
- return fmt.Sprintf(HealthProbeAnnotationPrefixPattern, port) + string(key)
+ return BuildAnnotationKeyForPort(port, PortParams(fmt.Sprintf(HealthProbeAnnotationPrefixPattern, key)))
}
// GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation get health probe configuration for port
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go
index be2a7ae0cd94..70acb3066948 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go
@@ -99,6 +99,9 @@ func (mc *MetricContext) ObserveOperationWithResult(isOperationSucceeded bool, l
resultCode := "succeeded"
if !isOperationSucceeded {
resultCode = "failed"
+ if len(mc.attributes) > 0 {
+ resultCode += mc.attributes[0][strings.Index(mc.attributes[0], "_"):]
+ }
mc.CountFailedOperation()
}
mc.logLatency(3, latency, append(labelAndValues, "result_code", resultCode)...)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go
index 9f1322709883..898c3dd7affd 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go
@@ -64,40 +64,15 @@ type NodeProvider interface {
// labelReconcileInfo lists Node labels to reconcile, and how to reconcile them.
// primaryKey and secondaryKey are keys of labels to reconcile.
-// - If both keys exist, but their values don't match. Use the value from the
-// primaryKey as the source of truth to reconcile.
-// - If ensureSecondaryExists is true, and the secondaryKey does not
-// exist, secondaryKey will be added with the value of the primaryKey.
+// - If both keys exist, but their values don't match. Use the value from the
+// primaryKey as the source of truth to reconcile.
+// - If ensureSecondaryExists is true, and the secondaryKey does not
+// exist, secondaryKey will be added with the value of the primaryKey.
var labelReconcileInfo = []struct {
primaryKey string
secondaryKey string
ensureSecondaryExists bool
-}{
- {
- // Reconcile the beta and the GA zone label using the beta label as
- // the source of truth
- // TODO: switch the primary key to GA labels in v1.21
- primaryKey: v1.LabelZoneFailureDomain,
- secondaryKey: v1.LabelZoneFailureDomainStable,
- ensureSecondaryExists: true,
- },
- {
- // Reconcile the beta and the stable region label using the beta label as
- // the source of truth
- // TODO: switch the primary key to GA labels in v1.21
- primaryKey: v1.LabelZoneRegion,
- secondaryKey: v1.LabelZoneRegionStable,
- ensureSecondaryExists: true,
- },
- {
- // Reconcile the beta and the stable instance-type label using the beta label as
- // the source of truth
- // TODO: switch the primary key to GA labels in v1.21
- primaryKey: v1.LabelInstanceType,
- secondaryKey: v1.LabelInstanceTypeStable,
- ensureSecondaryExists: true,
- },
-}
+}{}
// UpdateNodeSpecBackoff is the back configure for node update.
var UpdateNodeSpecBackoff = wait.Backoff{
@@ -155,7 +130,7 @@ func NewCloudNodeController(
// Use shared informer to listen to add/update of nodes. Note that any nodes
// that exist before node controller starts will show up in the update method
- cnc.nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ _, _ = cnc.nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { cnc.AddCloudNode(context.TODO(), obj) },
UpdateFunc: func(oldObj, newObj interface{}) { cnc.UpdateCloudNode(context.TODO(), oldObj, newObj) },
})
@@ -262,7 +237,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1.
nodeAddresses, err := cnc.getNodeAddressesByName(ctx, node)
if err != nil {
- return fmt.Errorf("Error getting node addresses for node %q: %v", node.Name, err)
+ return fmt.Errorf("getting node addresses for node %q: %w", node.Name, err)
}
if len(nodeAddresses) == 0 {
@@ -302,7 +277,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1.
newNode.Status.Addresses = nodeAddresses
_, _, err = PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
if err != nil {
- return fmt.Errorf("Error patching node with cloud ip addresses = [%v]", err)
+ return fmt.Errorf("patching node with cloud ip addresses: %w", err)
}
return nil
@@ -464,44 +439,36 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co
if instanceType, err := cnc.getInstanceTypeByName(ctx, node); err != nil {
return nil, err
} else if instanceType != "" {
- klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType)
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceTypeStable, instanceType)
nodeModifiers = append(nodeModifiers, func(n *v1.Node) {
if n.Labels == nil {
n.Labels = map[string]string{}
}
- n.Labels[v1.LabelInstanceType] = instanceType
n.Labels[v1.LabelInstanceTypeStable] = instanceType
})
}
-
zone, err := cnc.getZoneByName(ctx, node)
if err != nil {
return nil, fmt.Errorf("failed to get zone from cloud provider: %w", err)
}
if zone.FailureDomain != "" {
- klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain)
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomainStable, zone.FailureDomain)
nodeModifiers = append(nodeModifiers, func(n *v1.Node) {
if n.Labels == nil {
n.Labels = map[string]string{}
}
- n.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain
n.Labels[v1.LabelZoneFailureDomainStable] = zone.FailureDomain
})
}
if zone.Region != "" {
- klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region)
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegionStable, zone.Region)
nodeModifiers = append(nodeModifiers, func(n *v1.Node) {
if n.Labels == nil {
n.Labels = map[string]string{}
}
- n.Labels[v1.LabelZoneRegion] = zone.Region
n.Labels[v1.LabelZoneRegionStable] = zone.Region
})
}
-
platformSubFaultDomain, err := cnc.getPlatformSubFaultDomain()
if err != nil {
return nil, fmt.Errorf("failed to get platformSubFaultDomain: %w", err)
@@ -656,7 +623,7 @@ func (cnc *CloudNodeController) updateNetworkingCondition(node *v1.Node, network
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "NodeInitialization",
- Message: "Should wait for cloud routes",
+ Message: "Don't need to wait for cloud routes",
LastTransitionTime: currentTime,
})
} else {
@@ -664,7 +631,7 @@ func (cnc *CloudNodeController) updateNetworkingCondition(node *v1.Node, network
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
Reason: "NodeInitialization",
- Message: "Don't need to wait for cloud routes",
+ Message: "Waiting for cloud routes",
LastTransitionTime: currentTime,
})
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
index 6d6643b19a00..caacea3ef63f 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
@@ -17,6 +17,7 @@ limitations under the License.
package provider
import (
+ "context"
"errors"
"fmt"
"io"
@@ -27,6 +28,8 @@ import (
"sync"
"time"
+ ratelimitconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config"
+
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
@@ -34,19 +37,21 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
- utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
+ corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog/v2"
- "sigs.k8s.io/cloud-provider-azure/pkg/auth"
azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient"
@@ -99,8 +104,8 @@ var (
// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli
// for more details.
type Config struct {
- auth.AzureAuthConfig
- CloudProviderRateLimitConfig
+ ratelimitconfig.AzureAuthConfig
+ ratelimitconfig.CloudProviderRateLimitConfig
// The cloud configure type for Azure cloud provider. Supported values are file, secret and merge.
CloudConfigType cloudConfigType `json:"cloudConfigType,omitempty" yaml:"cloudConfigType,omitempty"`
@@ -141,8 +146,8 @@ type Config struct {
// The name of the scale set that should be used as the load balancer backend.
// If this is set, the Azure cloudprovider will only add nodes from that scale set to the load
// balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then
- // the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
- // In other words, if you use multiple agent pools (scale sets), you MUST set this field.
+ // the cloudprovider will try to add all nodes to a single backend pool which is forbidden in the basic sku.
+ // In other words, if you use multiple agent pools (scale sets), and loadBalancerSku is set to basic, you MUST set this field.
PrimaryScaleSetName string `json:"primaryScaleSetName,omitempty" yaml:"primaryScaleSetName,omitempty"`
// Tags determines what tags shall be applied to the shared resources managed by controller manager, which
// includes load balancer, security group and route table. The supported format is `a=b,c=d,...`. After updated
@@ -175,6 +180,8 @@ type Config struct {
// DisableAvailabilitySetNodes disables VMAS nodes support when "VMType" is set to "vmss".
DisableAvailabilitySetNodes bool `json:"disableAvailabilitySetNodes,omitempty" yaml:"disableAvailabilitySetNodes,omitempty"`
+ // EnableVmssFlexNodes enables vmss flex nodes support when "VMType" is set to "vmss".
+ EnableVmssFlexNodes bool `json:"enableVmssFlexNodes,omitempty" yaml:"enableVmssFlexNodes,omitempty"`
// DisableAzureStackCloud disables AzureStackCloud support. It should be used
// when setting AzureAuthConfig.Cloud with "AZURESTACKCLOUD" to customize ARM endpoints
// while the cluster is not running on AzureStack.
@@ -212,6 +219,9 @@ type Config struct {
CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries,omitempty" yaml:"cloudProviderBackoffRetries,omitempty"`
// Backoff duration
CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration,omitempty" yaml:"cloudProviderBackoffDuration,omitempty"`
+ // NonVmssUniformNodesCacheTTLInSeconds sets the Cache TTL for NonVmssUniformNodesCacheTTLInSeconds
+ // if not set, will use default value
+ NonVmssUniformNodesCacheTTLInSeconds int `json:"nonVmssUniformNodesCacheTTLInSeconds,omitempty" yaml:"nonVmssUniformNodesCacheTTLInSeconds,omitempty"`
// AvailabilitySetNodesCacheTTLInSeconds sets the Cache TTL for availabilitySetNodesCache
// if not set, will use default value
AvailabilitySetNodesCacheTTLInSeconds int `json:"availabilitySetNodesCacheTTLInSeconds,omitempty" yaml:"availabilitySetNodesCacheTTLInSeconds,omitempty"`
@@ -219,6 +229,12 @@ type Config struct {
VmssCacheTTLInSeconds int `json:"vmssCacheTTLInSeconds,omitempty" yaml:"vmssCacheTTLInSeconds,omitempty"`
// VmssVirtualMachinesCacheTTLInSeconds sets the cache TTL for vmssVirtualMachines
VmssVirtualMachinesCacheTTLInSeconds int `json:"vmssVirtualMachinesCacheTTLInSeconds,omitempty" yaml:"vmssVirtualMachinesCacheTTLInSeconds,omitempty"`
+
+ // VmssFlexCacheTTLInSeconds sets the cache TTL for VMSS Flex
+ VmssFlexCacheTTLInSeconds int `json:"vmssFlexCacheTTLInSeconds,omitempty" yaml:"vmssFlexCacheTTLInSeconds,omitempty"`
+ // VmssFlexVMCacheTTLInSeconds sets the cache TTL for vmss flex vms
+ VmssFlexVMCacheTTLInSeconds int `json:"vmssFlexVMCacheTTLInSeconds,omitempty" yaml:"vmssFlexVMCacheTTLInSeconds,omitempty"`
+
// VmCacheTTLInSeconds sets the cache TTL for vm
VMCacheTTLInSeconds int `json:"vmCacheTTLInSeconds,omitempty" yaml:"vmCacheTTLInSeconds,omitempty"`
// LoadBalancerCacheTTLInSeconds sets the cache TTL for load balancer
@@ -289,6 +305,7 @@ type Cloud struct {
DisksClient diskclient.Interface
SnapshotsClient snapshotclient.Interface
FileClient fileclient.Interface
+ BlobClient blobclient.Interface
VirtualMachineScaleSetsClient vmssclient.Interface
VirtualMachineScaleSetVMsClient vmssvmclient.Interface
VirtualMachineSizesClient vmsizeclient.Interface
@@ -299,6 +316,8 @@ type Cloud struct {
privatednszonegroupclient privatednszonegroupclient.Interface
virtualNetworkLinksClient virtualnetworklinksclient.Interface
PrivateLinkServiceClient privatelinkserviceclient.Interface
+ containerServiceClient containerserviceclient.Interface
+ deploymentClient deploymentclient.Interface
ResourceRequestBackoff wait.Backoff
Metadata *InstanceMetadataService
@@ -348,38 +367,27 @@ type Cloud struct {
// use LB frontEndIpConfiguration ID as the key and search for PLS attached to the frontEnd
plsCache *azcache.TimedCache
+ // Add service lister to always get latest service
+ serviceLister corelisters.ServiceLister
+ // node-sync-loop routine and service-reconcile routine should not update LoadBalancer at the same time
+ serviceReconcileLock sync.Mutex
+
*ManagedDiskController
*controllerCommon
}
-func init() {
- // In go-autorest SDK https://github.com/Azure/go-autorest/blob/master/autorest/sender.go#L258-L287,
- // if ARM returns http.StatusTooManyRequests, the sender doesn't increase the retry attempt count,
- // hence the Azure clients will keep retrying forever until it get a status code other than 429.
- // So we explicitly removes http.StatusTooManyRequests from autorest.StatusCodesForRetry.
- // Refer https://github.com/Azure/go-autorest/issues/398.
- // TODO(feiskyer): Use autorest.SendDecorator to customize the retry policy when new Azure SDK is available.
- statusCodesForRetry := make([]int, 0)
- for _, code := range autorest.StatusCodesForRetry {
- if code != http.StatusTooManyRequests {
- statusCodesForRetry = append(statusCodesForRetry, code)
- }
- }
- autorest.StatusCodesForRetry = statusCodesForRetry
-}
-
// NewCloud returns a Cloud with initialized clients
-func NewCloud(configReader io.Reader, callFromCCM bool) (cloudprovider.Interface, error) {
- az, err := NewCloudWithoutFeatureGates(configReader, callFromCCM)
+func NewCloud(ctx context.Context, configReader io.Reader, callFromCCM bool) (cloudprovider.Interface, error) {
+ az, err := NewCloudWithoutFeatureGates(ctx, configReader, callFromCCM)
if err != nil {
return nil, err
}
- az.ipv6DualStackEnabled = utilfeature.DefaultFeatureGate.Enabled(consts.IPv6DualStack)
+ az.ipv6DualStackEnabled = true
return az, nil
}
-func NewCloudFromConfigFile(configFilePath string, calFromCCM bool) (cloudprovider.Interface, error) {
+func NewCloudFromConfigFile(ctx context.Context, configFilePath string, calFromCCM bool) (cloudprovider.Interface, error) {
var (
cloud cloudprovider.Interface
err error
@@ -394,15 +402,15 @@ func NewCloudFromConfigFile(configFilePath string, calFromCCM bool) (cloudprovid
}
defer config.Close()
- cloud, err = NewCloud(config, calFromCCM)
+ cloud, err = NewCloud(ctx, config, calFromCCM)
} else {
// Pass explicit nil so plugins can actually check for nil. See
// "Why is my nil error value not equal to nil?" in golang.org/doc/faq.
- cloud, err = NewCloud(nil, false)
+ cloud, err = NewCloud(ctx, nil, false)
}
if err != nil {
- return nil, fmt.Errorf("could not init cloud provider azure: %v", err)
+ return nil, fmt.Errorf("could not init cloud provider azure: %w", err)
}
if cloud == nil {
return nil, fmt.Errorf("nil cloud")
@@ -429,7 +437,7 @@ func (az *Cloud) configSecretMetadata(secretName, secretNamespace, cloudConfigKe
}
}
-func NewCloudFromSecret(clientBuilder cloudprovider.ControllerClientBuilder, secretName, secretNamespace, cloudConfigKey string) (cloudprovider.Interface, error) {
+func NewCloudFromSecret(ctx context.Context, clientBuilder cloudprovider.ControllerClientBuilder, secretName, secretNamespace, cloudConfigKey string) (cloudprovider.Interface, error) {
az := &Cloud{
nodeNames: sets.NewString(),
nodeZones: map[string]sets.String{},
@@ -444,19 +452,19 @@ func NewCloudFromSecret(clientBuilder cloudprovider.ControllerClientBuilder, sec
az.Initialize(clientBuilder, wait.NeverStop)
- err := az.InitializeCloudFromSecret()
+ err := az.InitializeCloudFromSecret(ctx)
if err != nil {
- return nil, fmt.Errorf("NewCloudFromSecret: failed to initialize cloud from secret %s/%s: %v", az.SecretNamespace, az.SecretName, err)
+ return nil, fmt.Errorf("NewCloudFromSecret: failed to initialize cloud from secret %s/%s: %w", az.SecretNamespace, az.SecretName, err)
}
- az.ipv6DualStackEnabled = utilfeature.DefaultFeatureGate.Enabled(consts.IPv6DualStack)
+ az.ipv6DualStackEnabled = true
return az, nil
}
// NewCloudWithoutFeatureGates returns a Cloud without trying to wire the feature gates. This is used by the unit tests
// that don't load the actual features being used in the cluster.
-func NewCloudWithoutFeatureGates(configReader io.Reader, callFromCCM bool) (*Cloud, error) {
+func NewCloudWithoutFeatureGates(ctx context.Context, configReader io.Reader, callFromCCM bool) (*Cloud, error) {
config, err := ParseConfig(configReader)
if err != nil {
return nil, err
@@ -472,7 +480,7 @@ func NewCloudWithoutFeatureGates(configReader io.Reader, callFromCCM bool) (*Clo
nodePrivateIPs: map[string]sets.String{},
}
- err = az.InitializeCloudFromConfig(config, false, callFromCCM)
+ err = az.InitializeCloudFromConfig(ctx, config, false, callFromCCM)
if err != nil {
return nil, err
}
@@ -481,7 +489,7 @@ func NewCloudWithoutFeatureGates(configReader io.Reader, callFromCCM bool) (*Clo
}
// InitializeCloudFromConfig initializes the Cloud from config.
-func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret, callFromCCM bool) error {
+func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, fromSecret, callFromCCM bool) error {
if config == nil {
// should not reach here
return fmt.Errorf("InitializeCloudFromConfig: cannot initialize from nil config")
@@ -539,13 +547,13 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret, callFromC
}
}
- env, err := auth.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem)
+ env, err := ratelimitconfig.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem)
if err != nil {
return err
}
- servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env, env.ServiceManagementEndpoint)
- if errors.Is(err, auth.ErrorNoAuth) {
+ servicePrincipalToken, err := ratelimitconfig.GetServicePrincipalToken(&config.AzureAuthConfig, env, env.ServiceManagementEndpoint)
+ if errors.Is(err, ratelimitconfig.ErrorNoAuth) {
// Only controller-manager would lazy-initialize from secret, and credentials are required for such case.
if fromSecret {
err := fmt.Errorf("no credentials provided for Azure cloud provider")
@@ -566,7 +574,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret, callFromC
}
// Initialize rate limiting config options.
- InitializeCloudProviderRateLimitConfig(&config.CloudProviderRateLimitConfig)
+ ratelimitconfig.InitializeCloudProviderRateLimitConfig(&config.CloudProviderRateLimitConfig)
resourceRequestBackoff := az.setCloudProviderBackoffDefaults(config)
@@ -600,7 +608,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret, callFromC
}
if strings.EqualFold(consts.VMTypeVMSS, az.Config.VMType) {
- az.VMSet, err = newScaleSet(az)
+ az.VMSet, err = newScaleSet(ctx, az)
+ if err != nil {
+ return err
+ }
+ } else if strings.EqualFold(consts.VMTypeVmssFlex, az.Config.VMType) {
+ az.VMSet, err = newFlexScaleSet(ctx, az)
if err != nil {
return err
}
@@ -718,12 +731,12 @@ func (az *Cloud) configureMultiTenantClients(servicePrincipalToken *adal.Service
var err error
var multiTenantServicePrincipalToken *adal.MultiTenantServicePrincipalToken
var networkResourceServicePrincipalToken *adal.ServicePrincipalToken
- if az.Config.UsesNetworkResourceInDifferentTenantOrSubscription() {
- multiTenantServicePrincipalToken, err = auth.GetMultiTenantServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
+ if az.Config.UsesNetworkResourceInDifferentTenant() {
+ multiTenantServicePrincipalToken, err = ratelimitconfig.GetMultiTenantServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
if err != nil {
return err
}
- networkResourceServicePrincipalToken, err = auth.GetNetworkResourceServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
+ networkResourceServicePrincipalToken, err = ratelimitconfig.GetNetworkResourceServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
if err != nil {
return err
}
@@ -797,8 +810,16 @@ func (az *Cloud) configAzureClients(
loadBalancerClientConfig := azClientConfig.WithRateLimiter(az.Config.LoadBalancerRateLimit)
securityGroupClientConfig := azClientConfig.WithRateLimiter(az.Config.SecurityGroupRateLimit)
publicIPClientConfig := azClientConfig.WithRateLimiter(az.Config.PublicIPAddressRateLimit)
+ containerServiceConfig := azClientConfig.WithRateLimiter(az.Config.ContainerServiceRateLimit)
+ deploymentConfig := azClientConfig.WithRateLimiter(az.Config.DeploymentRateLimit)
+ privateDNSConfig := azClientConfig.WithRateLimiter(az.Config.PrivateDNSRateLimit)
+ privateDNSZoenGroupConfig := azClientConfig.WithRateLimiter(az.Config.PrivateDNSZoneGroupRateLimit)
+ privateEndpointConfig := azClientConfig.WithRateLimiter(az.Config.PrivateEndpointRateLimit)
+ privateLinkServiceConfig := azClientConfig.WithRateLimiter(az.Config.PrivateLinkServiceRateLimit)
+ virtualNetworkConfig := azClientConfig.WithRateLimiter(az.Config.VirtualNetworkRateLimit)
// TODO(ZeroMagic): add azurefileRateLimit
fileClientConfig := azClientConfig.WithRateLimiter(nil)
+ blobClientConfig := azClientConfig.WithRateLimiter(nil)
vmasClientConfig := azClientConfig.WithRateLimiter(az.Config.AvailabilitySetRateLimit)
zoneClientConfig := azClientConfig.WithRateLimiter(nil)
@@ -820,7 +841,9 @@ func (az *Cloud) configAzureClients(
loadBalancerClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
securityGroupClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
publicIPClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+ }
+ if az.UsesNetworkResourceInDifferentSubscription() {
routeClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
subnetClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
routeTableClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
@@ -845,12 +868,15 @@ func (az *Cloud) configAzureClients(
az.SecurityGroupsClient = securitygroupclient.New(securityGroupClientConfig)
az.PublicIPAddressesClient = publicipclient.New(publicIPClientConfig)
az.FileClient = fileclient.New(fileClientConfig)
+ az.BlobClient = blobclient.New(blobClientConfig)
az.AvailabilitySetsClient = vmasclient.New(vmasClientConfig)
- az.privateendpointclient = privateendpointclient.New(azClientConfig)
- az.privatednsclient = privatednsclient.New(azClientConfig)
- az.privatednszonegroupclient = privatednszonegroupclient.New(azClientConfig)
- az.virtualNetworkLinksClient = virtualnetworklinksclient.New(azClientConfig)
- az.PrivateLinkServiceClient = privatelinkserviceclient.New(azClientConfig)
+ az.privateendpointclient = privateendpointclient.New(privateEndpointConfig)
+ az.privatednsclient = privatednsclient.New(privateDNSConfig)
+ az.privatednszonegroupclient = privatednszonegroupclient.New(privateDNSZoenGroupConfig)
+ az.virtualNetworkLinksClient = virtualnetworklinksclient.New(virtualNetworkConfig)
+ az.PrivateLinkServiceClient = privatelinkserviceclient.New(privateLinkServiceConfig)
+ az.containerServiceClient = containerserviceclient.New(containerServiceConfig)
+ az.deploymentClient = deploymentclient.New(deploymentConfig)
if az.ZoneClient == nil {
az.ZoneClient = zoneclient.New(zoneClientConfig)
@@ -972,8 +998,8 @@ func initDiskControllers(az *Cloud) error {
// Common controller contains the function
// needed by both blob disk and managed disk controllers
- qps := float32(defaultAtachDetachDiskQPS)
- bucket := defaultAtachDetachDiskBucket
+ qps := float32(ratelimitconfig.DefaultAtachDetachDiskQPS)
+ bucket := ratelimitconfig.DefaultAtachDetachDiskBucket
if az.Config.AttachDetachDiskRateLimit != nil {
qps = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitQPSWrite
bucket = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitBucketWrite
@@ -1007,7 +1033,7 @@ func initDiskControllers(az *Cloud) error {
func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
klog.Infof("Setting up informers for Azure cloud provider")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
- nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ _, _ = nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
node := obj.(*v1.Node)
az.updateNodeCaches(nil, node)
@@ -1034,9 +1060,14 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
}
}
az.updateNodeCaches(node, nil)
+
+ klog.V(4).Infof("Removing node %s from VMSet cache.", node.Name)
+ _ = az.VMSet.DeleteCacheForNode(node.Name)
},
})
az.nodeInformerSynced = nodeInformer.HasSynced
+
+ az.serviceLister = informerFactory.Core().V1().Services().Lister()
}
// updateNodeCaches updates local cache for node's zones and external resource groups.
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
index 0c47e54642a8..fadf4353dc8f 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
@@ -24,7 +24,7 @@ import (
"regexp"
"strings"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
"github.com/Azure/go-autorest/autorest/to"
@@ -98,7 +98,7 @@ func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMac
klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, rerr)
return nil, rerr.Error()
}
- klog.V(2).Infof("VirtualMachinesClient.List(%v) success", resourceGroup)
+ klog.V(6).Infof("VirtualMachinesClient.List(%v) success", resourceGroup)
return allNodes, nil
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go
index 4b0af9e7e542..6711c5f65169 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go
@@ -39,7 +39,7 @@ const (
)
// InitializeCloudFromSecret initializes Azure cloud provider from Kubernetes secret.
-func (az *Cloud) InitializeCloudFromSecret() error {
+func (az *Cloud) InitializeCloudFromSecret(ctx context.Context) error {
config, err := az.GetConfigFromSecret()
if err != nil {
klog.Errorf("Failed to get cloud-config from secret: %v", err)
@@ -51,7 +51,7 @@ func (az *Cloud) InitializeCloudFromSecret() error {
return nil
}
- if err := az.InitializeCloudFromConfig(config, true, true); err != nil {
+ if err := az.InitializeCloudFromConfig(ctx, config, true, true); err != nil {
klog.Errorf("Failed to initialize Azure cloud provider: %v", err)
return fmt.Errorf("InitializeCloudFromSecret: failed to initialize Azure cloud provider: %w", err)
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
index 34baafc9ec47..883e30e0c423 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
@@ -27,7 +27,7 @@ import (
"sync"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"k8s.io/apimachinery/pkg/types"
kwait "k8s.io/apimachinery/pkg/util/wait"
@@ -113,29 +113,39 @@ type ExtendedLocation struct {
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
- // 1. vmType is standard, return cloud.VMSet directly.
- if c.cloud.VMType == consts.VMTypeStandard {
+ // 1. vmType is standard or vmssflex, return cloud.VMSet directly.
+ // 1.1 all the nodes in the cluster are avset nodes.
+ // 1.2 all the nodes in the cluster are vmssflex nodes.
+ if c.cloud.VMType == consts.VMTypeStandard || c.cloud.VMType == consts.VMTypeVmssFlex {
return c.cloud.VMSet, nil
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to ScaleSet.
+ // 2.1 all the nodes in the cluster are vmss uniform nodes.
+ // 2.2 mix node: the nodes in the cluster can be any of avset nodes, vmss uniform nodes and vmssflex nodes.
ss, ok := c.cloud.VMSet.(*ScaleSet)
if !ok {
return nil, fmt.Errorf("error of converting vmSet (%q) to ScaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType)
}
- // 3. If the node is managed by availability set, then return ss.availabilitySet.
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName), crt)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(mapNodeNameToVMName(nodeName), crt)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("getNodeVMSet: failed to check the node %s management type: %w", mapNodeNameToVMName(nodeName), err)
}
- if managedByAS {
+ // 3. If the node is managed by availability set, then return ss.availabilitySet.
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet, nil
}
+ if vmManagementType == ManagedByVmssFlex {
+ // 4. If the node is managed by vmss flex, then return ss.flexScaleSet.
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet, nil
+ }
- // 4. Node is managed by vmss
+ // 5. Node is managed by vmss
return ss, nil
+
}
// AttachDisk attaches a disk to vm
@@ -189,8 +199,8 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName,
diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
}
- if disk.DiskProperties.DiskState != compute.DiskStateUnattached && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
- return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, disk.DiskProperties.DiskState, compute.DiskStateUnattached)
+ if disk.DiskProperties.DiskState != compute.Unattached && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
+ return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, disk.DiskProperties.DiskState, compute.Unattached)
}
}
@@ -257,11 +267,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName,
klog.Warningf("azureDisk - switch to batch operation due to rate limited, QPS: %f", c.diskOpRateLimiter.QPS())
}
}
- resourceGroup, _, err := getInfoFromDiskURI(diskURI)
- if err != nil {
- return -1, err
- }
- return lun, vmset.WaitForUpdateResult(ctx, future, resourceGroup, "attach_disk")
+ return lun, vmset.WaitForUpdateResult(ctx, future, nodeName, "attach_disk")
}
func (c *controllerCommon) insertAttachDiskRequest(diskURI, nodeName string, options *AttachDiskOptions) error {
@@ -353,7 +359,7 @@ func (c *controllerCommon) DetachDisk(ctx context.Context, diskName, diskURI str
} else {
lun, _, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName)
if errGetLun == nil || !strings.Contains(errGetLun.Error(), consts.CannotFindDiskLUN) {
- return fmt.Errorf("disk(%s) is still attatched to node(%s) on lun(%d), error: %v", diskURI, nodeName, lun, errGetLun)
+ return fmt.Errorf("disk(%s) is still attached to node(%s) on lun(%d), error: %w", diskURI, nodeName, lun, errGetLun)
}
}
@@ -617,7 +623,7 @@ func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string)
func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (compute.CreationData, error) {
if sourceResourceID == "" {
return compute.CreationData{
- CreateOption: compute.DiskCreateOptionEmpty,
+ CreateOption: compute.Empty,
}, nil
}
@@ -633,7 +639,7 @@ func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc
}
default:
return compute.CreationData{
- CreateOption: compute.DiskCreateOptionEmpty,
+ CreateOption: compute.Empty,
}, nil
}
@@ -645,7 +651,7 @@ func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc
return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE)
}
return compute.CreationData{
- CreateOption: compute.DiskCreateOptionCopy,
+ CreateOption: compute.Copy,
SourceResourceID: &sourceResourceID,
}, nil
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go
index 7e4b3537dfde..16ae72ec9840 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go
@@ -18,10 +18,11 @@ package provider
import (
"context"
+ "fmt"
"net/http"
"strings"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
@@ -53,13 +54,17 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa
opt := v
attached := false
for _, disk := range *vm.StorageProfile.DataDisks {
- if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) {
- attached = true
- break
+ if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) && disk.Lun != nil {
+ if *disk.Lun == opt.lun {
+ attached = true
+ break
+ } else {
+ return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun)
+ }
}
}
if attached {
- klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s)", diskURI, nodeName)
+ klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.lun)
continue
}
@@ -97,7 +102,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, vmName, diskMap)
// Invalidate the cache right after updating
defer func() {
- _ = as.cloud.vmCache.Delete(vmName)
+ _ = as.DeleteCacheForNode(vmName)
}()
future, rerr := as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
@@ -118,9 +123,20 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa
return future, nil
}
+func (as *availabilitySet) DeleteCacheForNode(nodeName string) error {
+ _ = as.cloud.vmCache.Delete(nodeName)
+ return nil
+}
+
// WaitForUpdateResult waits for the response of the update request
-func (as *availabilitySet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error {
- if rerr := as.VirtualMachinesClient.WaitForUpdateResult(ctx, future, resourceGroupName, source); rerr != nil {
+func (as *availabilitySet) WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error {
+ vmName := mapNodeNameToVMName(nodeName)
+ nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
+ if err != nil {
+ return err
+ }
+
+ if rerr := as.VirtualMachinesClient.WaitForUpdateResult(ctx, future, nodeResourceGroup, source); rerr != nil {
return rerr.Error()
}
return nil
@@ -184,7 +200,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap)
// Invalidate the cache right after updating
defer func() {
- _ = as.cloud.vmCache.Delete(vmName)
+ _ = as.DeleteCacheForNode(vmName)
}()
rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
@@ -215,7 +231,7 @@ func (as *availabilitySet) UpdateVM(ctx context.Context, nodeName types.NodeName
klog.V(2).Infof("azureDisk - update(%s): vm(%s)", nodeResourceGroup, vmName)
// Invalidate the cache right after updating
defer func() {
- _ = as.cloud.vmCache.Delete(vmName)
+ _ = as.DeleteCacheForNode(vmName)
}()
rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, compute.VirtualMachineUpdate{}, "update_vm")
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go
index a8f01c1e1fe1..922bae588425 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go
@@ -18,10 +18,11 @@ package provider
import (
"context"
+ "fmt"
"net/http"
"strings"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
@@ -30,6 +31,7 @@ import (
azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+ "sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
// AttachDisk attaches a disk to vm
@@ -59,13 +61,17 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis
opt := v
attached := false
for _, disk := range *storageProfile.DataDisks {
- if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) {
- attached = true
- break
+ if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) && disk.Lun != nil {
+ if *disk.Lun == opt.lun {
+ attached = true
+ break
+ } else {
+ return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun)
+ }
}
}
if attached {
- klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s)", diskURI, nodeName)
+ klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.lun)
continue
}
@@ -103,7 +109,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis
// Invalidate the cache right after updating
defer func() {
- _ = ss.deleteCacheForNode(vmName)
+ _ = ss.DeleteCacheForNode(vmName)
}()
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, nodeName, diskMap)
@@ -126,8 +132,28 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis
}
// WaitForUpdateResult waits for the response of the update request
-func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error {
- if rerr := ss.VirtualMachineScaleSetVMsClient.WaitForUpdateResult(ctx, future, resourceGroupName, source); rerr != nil {
+func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error {
+ vmName := mapNodeNameToVMName(nodeName)
+ nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
+ if err != nil {
+ return err
+ }
+
+ var result *compute.VirtualMachineScaleSetVM
+ var rerr *retry.Error
+ defer func() {
+ if rerr == nil && result != nil && result.VirtualMachineScaleSetVMProperties != nil {
+ // If we have an updated result, we update the vmss vm cache
+ vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return
+ }
+ _ = ss.updateCache(vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, result)
+ }
+ }()
+
+ result, rerr = ss.VirtualMachineScaleSetVMsClient.WaitForUpdateResult(ctx, future, nodeResourceGroup, source)
+ if rerr != nil {
return rerr.Error()
}
return nil
@@ -147,10 +173,13 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis
}
var disks []compute.DataDisk
- storageProfile := vm.VirtualMachineScaleSetVMProperties.StorageProfile
- if storageProfile != nil && storageProfile.DataDisks != nil {
- disks = make([]compute.DataDisk, len(*storageProfile.DataDisks))
- copy(disks, *storageProfile.DataDisks)
+
+ if vm != nil && vm.VirtualMachineScaleSetVMProperties != nil {
+ storageProfile := vm.VirtualMachineScaleSetVMProperties.StorageProfile
+ if storageProfile != nil && storageProfile.DataDisks != nil {
+ disks = make([]compute.DataDisk, len(*storageProfile.DataDisks))
+ copy(disks, *storageProfile.DataDisks)
+ }
}
bFoundDisk := false
for i, disk := range disks {
@@ -189,13 +218,31 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis
},
},
}
- // Invalidate the cache right after updating
+
+ var updateResult *compute.VirtualMachineScaleSetVM
+ var rerr *retry.Error
+
defer func() {
- _ = ss.deleteCacheForNode(vmName)
+ // If there is an error with Update operation,
+ // invalidate the cache
+ if rerr != nil {
+ _ = ss.DeleteCacheForNode(vmName)
+ return
+ }
+
+ // Update the cache with the updated result only if its not nil
+ // and contains the VirtualMachineScaleSetVMProperties
+ if updateResult != nil && updateResult.VirtualMachineScaleSetVMProperties != nil {
+ if err := ss.updateCache(vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, updateResult); err != nil {
+ klog.Errorf("updateCache(%s, %s, %s) failed with error: %v", vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, err)
+ // if err faced during updating cache, invalidate the cache
+ _ = ss.DeleteCacheForNode(vmName)
+ }
+ }
}()
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, nodeName, diskMap)
- rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM,
+ updateResult, rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM,
"detach_disk")
if rerr != nil {
klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr)
@@ -203,7 +250,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis
klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName)
disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks)
newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks
- rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "detach_disk")
+ updateResult, rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "detach_disk")
}
}
@@ -227,13 +274,30 @@ func (ss *ScaleSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error
return err
}
- // Invalidate the cache right after updating
+ var updateResult *compute.VirtualMachineScaleSetVM
+ var rerr *retry.Error
+
defer func() {
- _ = ss.deleteCacheForNode(vmName)
+ // If there is an error with Update operation,
+ // invalidate the cache
+ if rerr != nil {
+ _ = ss.DeleteCacheForNode(vmName)
+ return
+ }
+
+ // Update the cache with the updated result only if its not nil
+ // and contains the VirtualMachineScaleSetVMProperties
+ if updateResult != nil && updateResult.VirtualMachineScaleSetVMProperties != nil {
+ if err := ss.updateCache(vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, updateResult); err != nil {
+ klog.Errorf("updateCache(%s, %s, %s) failed with error: %v", vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, err)
+ // if err faced during updating cache, invalidate the cache
+ _ = ss.DeleteCacheForNode(vmName)
+ }
+ }
}()
klog.V(2).Infof("azureDisk - update(%s): vm(%s)", nodeResourceGroup, nodeName)
- rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, compute.VirtualMachineScaleSetVM{}, "update_vmss_instance")
+ updateResult, rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, compute.VirtualMachineScaleSetVM{}, "update_vmss_instance")
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - returned with %v", nodeResourceGroup, nodeName, rerr)
if rerr != nil {
@@ -249,11 +313,15 @@ func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCache
return nil, nil, err
}
- storageProfile := vm.AsVirtualMachineScaleSetVM().StorageProfile
+ if vm != nil && vm.AsVirtualMachineScaleSetVM() != nil && vm.AsVirtualMachineScaleSetVM().VirtualMachineScaleSetVMProperties != nil {
+ storageProfile := vm.AsVirtualMachineScaleSetVM().StorageProfile
+
+ if storageProfile == nil || storageProfile.DataDisks == nil {
+ return nil, nil, nil
+ }
- if storageProfile == nil || storageProfile.DataDisks == nil {
- return nil, nil, nil
+ return *storageProfile.DataDisks, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil
}
- return *storageProfile.DataDisks, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil
+ return nil, nil, nil
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go
new file mode 100644
index 000000000000..e8cab8553230
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go
@@ -0,0 +1,259 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/to"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
+
+ azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+ "sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+// AttachDisk attaches a disk to vm
+func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) {
+ vmName := mapNodeNameToVMName(nodeName)
+ vm, err := fs.getVmssFlexVM(vmName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return nil, err
+ }
+
+ nodeResourceGroup, err := fs.GetNodeResourceGroup(vmName)
+ if err != nil {
+ return nil, err
+ }
+
+ disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
+ copy(disks, *vm.StorageProfile.DataDisks)
+
+ for k, v := range diskMap {
+ diskURI := k
+ opt := v
+ attached := false
+ for _, disk := range *vm.StorageProfile.DataDisks {
+ if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) && disk.Lun != nil {
+ if *disk.Lun == opt.lun {
+ attached = true
+ break
+ } else {
+ return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun)
+ }
+ }
+ }
+ if attached {
+ klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.lun)
+ continue
+ }
+
+ managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
+ if opt.diskEncryptionSetID == "" {
+ if vm.StorageProfile.OsDisk != nil &&
+ vm.StorageProfile.OsDisk.ManagedDisk != nil &&
+ vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
+ vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
+ // set diskEncryptionSet as value of os disk by default
+ opt.diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
+ }
+ }
+ if opt.diskEncryptionSetID != "" {
+ managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.diskEncryptionSetID}
+ }
+ disks = append(disks,
+ compute.DataDisk{
+ Name: &opt.diskName,
+ Lun: &opt.lun,
+ Caching: opt.cachingMode,
+ CreateOption: "attach",
+ ManagedDisk: managedDisk,
+ WriteAcceleratorEnabled: to.BoolPtr(opt.writeAcceleratorEnabled),
+ })
+ }
+
+ newVM := compute.VirtualMachineUpdate{
+ VirtualMachineProperties: &compute.VirtualMachineProperties{
+ StorageProfile: &compute.StorageProfile{
+ DataDisks: &disks,
+ },
+ },
+ }
+
+ defer func() {
+ _ = fs.DeleteCacheForNode(vmName)
+ }()
+
+ klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, vmName, diskMap)
+
+ future, rerr := fs.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, *vm.Name, newVM, "attach_disk")
+ if rerr != nil {
+ klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr)
+ if rerr.HTTPStatusCode == http.StatusNotFound {
+ klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName)
+ disks := fs.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks)
+ newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks
+ future, rerr = fs.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, *vm.Name, newVM, "attach_disk")
+ }
+ }
+
+ klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr)
+ if rerr != nil {
+ return future, rerr.Error()
+ }
+ return future, nil
+}
+
+// DetachDisk detaches a disk from VM
+func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error {
+ vmName := mapNodeNameToVMName(nodeName)
+ vm, err := fs.getVmssFlexVM(vmName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ // if host doesn't exist, no need to detach
+ klog.Warningf("azureDisk - cannot find node %s, skip detaching disk list(%s)", nodeName, diskMap)
+ return nil
+ }
+
+ nodeResourceGroup, err := fs.GetNodeResourceGroup(vmName)
+ if err != nil {
+ return err
+ }
+
+ disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
+ copy(disks, *vm.StorageProfile.DataDisks)
+
+ bFoundDisk := false
+ for i, disk := range disks {
+ for diskURI, diskName := range diskMap {
+ if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
+ (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
+ (disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
+ // found the disk
+ klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI)
+ disks[i].ToBeDetached = to.BoolPtr(true)
+ bFoundDisk = true
+ }
+ }
+ }
+
+ if !bFoundDisk {
+ // only log here, next action is to update VM status with original meta data
+ klog.Errorf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap)
+ } else {
+ if strings.EqualFold(fs.cloud.Environment.Name, consts.AzureStackCloudName) && !fs.Config.DisableAzureStackCloud {
+ // Azure stack does not support ToBeDetached flag, use original way to detach disk
+ newDisks := []compute.DataDisk{}
+ for _, disk := range disks {
+ if !to.Bool(disk.ToBeDetached) {
+ newDisks = append(newDisks, disk)
+ }
+ }
+ disks = newDisks
+ }
+ }
+
+ newVM := compute.VirtualMachineUpdate{
+ VirtualMachineProperties: &compute.VirtualMachineProperties{
+ StorageProfile: &compute.StorageProfile{
+ DataDisks: &disks,
+ },
+ },
+ }
+
+ defer func() {
+ _ = fs.DeleteCacheForNode(vmName)
+ }()
+
+ klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap)
+
+ rerr := fs.VirtualMachinesClient.Update(ctx, nodeResourceGroup, *vm.Name, newVM, "detach_disk")
+ if rerr != nil {
+ klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr)
+ if rerr.HTTPStatusCode == http.StatusNotFound {
+ klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName)
+ disks := fs.filterNonExistingDisks(ctx, *vm.StorageProfile.DataDisks)
+ newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks
+ rerr = fs.VirtualMachinesClient.Update(ctx, nodeResourceGroup, *vm.Name, newVM, "detach_disk")
+ }
+ }
+
+ klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr)
+ if rerr != nil {
+ return rerr.Error()
+ }
+ return nil
+}
+
+// WaitForUpdateResult waits for the response of the update request
+func (fs *FlexScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error {
+ vmName := mapNodeNameToVMName(nodeName)
+ nodeResourceGroup, err := fs.GetNodeResourceGroup(vmName)
+ if err != nil {
+ return err
+ }
+ if rerr := fs.VirtualMachinesClient.WaitForUpdateResult(ctx, future, nodeResourceGroup, source); rerr != nil {
+ return rerr.Error()
+ }
+ return nil
+}
+
+// UpdateVM updates a vm
+func (fs *FlexScaleSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error {
+ vmName := mapNodeNameToVMName(nodeName)
+ vm, err := fs.getVmssFlexVM(vmName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ // if host doesn't exist, no need to update
+ klog.Warningf("azureDisk - cannot find node %s, skip updating vm)", nodeName)
+ return nil
+ }
+ nodeResourceGroup, err := fs.GetNodeResourceGroup(vmName)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ _ = fs.DeleteCacheForNode(vmName)
+ }()
+
+ klog.V(2).Infof("azureDisk - update(%s): vm(%s)", nodeResourceGroup, vmName)
+
+ rerr := fs.VirtualMachinesClient.Update(ctx, nodeResourceGroup, *vm.Name, compute.VirtualMachineUpdate{}, "update_vm")
+ klog.V(2).Infof("azureDisk - update(%s): vm(%s) - returned with %v", nodeResourceGroup, vmName, rerr)
+ if rerr != nil {
+ return rerr.Error()
+ }
+ return nil
+}
+
+// GetDataDisks gets a list of data disks attached to the node.
+func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
+ vm, err := fs.getVmssFlexVM(string(nodeName), crt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if vm.StorageProfile.DataDisks == nil {
+ return nil, nil, nil
+ }
+
+ return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
index 1ef726c56def..8c849dd162fc 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
@@ -17,14 +17,16 @@ limitations under the License.
package provider
import (
+ "context"
"fmt"
+ "sigs.k8s.io/cloud-provider-azure/pkg/provider/config"
+
"github.com/golang/mock/gomock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
- "sigs.k8s.io/cloud-provider-azure/pkg/auth"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient"
@@ -52,7 +54,7 @@ func NewTestScaleSet(ctrl *gomock.Controller) (*ScaleSet, error) {
func newTestScaleSetWithState(ctrl *gomock.Controller) (*ScaleSet, error) {
cloud := GetTestCloud(ctrl)
- ss, err := newScaleSet(cloud)
+ ss, err := newScaleSet(context.Background(), cloud)
if err != nil {
return nil, err
}
@@ -60,11 +62,21 @@ func newTestScaleSetWithState(ctrl *gomock.Controller) (*ScaleSet, error) {
return ss.(*ScaleSet), nil
}
+func NewTestFlexScaleSet(ctrl *gomock.Controller) (*FlexScaleSet, error) {
+ cloud := GetTestCloud(ctrl)
+ fs, err := newFlexScaleSet(context.Background(), cloud)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.(*FlexScaleSet), nil
+}
+
// GetTestCloud returns a fake azure cloud for unit tests in Azure related CSI drivers
func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
az = &Cloud{
Config: Config{
- AzureAuthConfig: auth.AzureAuthConfig{
+ AzureAuthConfig: config.AzureAuthConfig{
TenantID: "tenant",
SubscriptionID: "subscription",
},
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go
index f3046f2cc585..442d84066150 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go
@@ -17,24 +17,27 @@ limitations under the License.
package provider
import (
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
)
// create file share
-func (az *Cloud) createFileShare(resourceGroupName, accountName string, shareOptions *fileclient.ShareOptions) error {
- return az.FileClient.CreateFileShare(resourceGroupName, accountName, shareOptions)
+func (az *Cloud) createFileShare(ctx context.Context, subsID, resourceGroupName, accountName string, shareOptions *fileclient.ShareOptions) error {
+ _, err := az.FileClient.WithSubscriptionID(subsID).CreateFileShare(ctx, resourceGroupName, accountName, shareOptions, "")
+ return err
}
-func (az *Cloud) deleteFileShare(resourceGroupName, accountName, name string) error {
- return az.FileClient.DeleteFileShare(resourceGroupName, accountName, name)
+func (az *Cloud) deleteFileShare(ctx context.Context, subsID, resourceGroupName, accountName, name string) error {
+ return az.FileClient.WithSubscriptionID(subsID).DeleteFileShare(ctx, resourceGroupName, accountName, name, "")
}
-func (az *Cloud) resizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error {
- return az.FileClient.ResizeFileShare(resourceGroupName, accountName, name, sizeGiB)
+func (az *Cloud) resizeFileShare(ctx context.Context, subsID, resourceGroupName, accountName, name string, sizeGiB int) error {
+ return az.FileClient.WithSubscriptionID(subsID).ResizeFileShare(ctx, resourceGroupName, accountName, name, sizeGiB)
}
-func (az *Cloud) getFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
- return az.FileClient.GetFileShare(resourceGroupName, accountName, name)
+func (az *Cloud) getFileShare(ctx context.Context, subsID, resourceGroupName, accountName, name string) (storage.FileShare, error) {
+ return az.FileClient.WithSubscriptionID(subsID).GetFileShare(ctx, resourceGroupName, accountName, name, "")
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go
index 708804a19305..b403ff6d2208 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go
@@ -23,6 +23,7 @@ import (
"net/http"
"k8s.io/klog/v2"
+
azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
)
@@ -72,6 +73,7 @@ type ComputeMetadata struct {
ResourceGroup string `json:"resourceGroupName,omitempty"`
VMScaleSetName string `json:"vmScaleSetName,omitempty"`
SubscriptionID string `json:"subscriptionId,omitempty"`
+ ResourceID string `json:"resourceId,omitempty"`
}
// InstanceMetadata represents instance information.
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
index fd906a92e278..6a95eee054d0 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
@@ -28,7 +28,8 @@ import (
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog/v2"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+
azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
)
@@ -385,22 +386,16 @@ func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName
resourceGroup := strings.ToLower(metadata.Compute.ResourceGroup)
subscriptionID := strings.ToLower(metadata.Compute.SubscriptionID)
- // Compose instanceID based on nodeName for standard instance.
- if metadata.Compute.VMScaleSetName == "" {
- return az.getStandardMachineID(subscriptionID, resourceGroup, nodeName), nil
+ if metadata.Compute.ResourceID == "" {
+ // No ResourceID is got from instance metadata service, clean up cache and report errors.
+ _ = az.Metadata.imsCache.Delete(consts.MetadataCacheKey)
+ return "", fmt.Errorf("get empty ResoureceID from instance metadata service")
}
- // Get scale set name and instanceID from vmName for vmss.
- ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name)
- if err != nil {
- if errors.Is(err, ErrorNotVmssInstance) {
- // Compose machineID for standard Node.
- return az.getStandardMachineID(subscriptionID, resourceGroup, nodeName), nil
- }
- return "", err
- }
- // Compose instanceID based on ssName and instanceID for vmss instance.
- return az.getVmssMachineID(subscriptionID, resourceGroup, ssName, instanceID), nil
+ providerID := strings.Replace(metadata.Compute.ResourceID, metadata.Compute.SubscriptionID, subscriptionID, -1)
+ providerID = strings.Replace(providerID, metadata.Compute.ResourceGroup, resourceGroup, -1)
+
+ return providerID, nil
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
@@ -433,7 +428,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
// InstanceType returns the type of the specified instance.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
-// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
+// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(name))
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
index 512e5734664d..05d62796bf6f 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
@@ -19,8 +19,11 @@ package provider
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"math"
+ "net"
+ "net/netip"
"reflect"
"sort"
"strconv"
@@ -30,6 +33,7 @@ import (
"github.com/Azure/go-autorest/autorest/to"
v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
cloudprovider "k8s.io/cloud-provider"
@@ -44,6 +48,36 @@ import (
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
+// getServiceLoadBalancerIP retrieves LB IP from IPv4 annotation, then IPv6 annotation, then service.Spec.LoadBalancerIP.
+// TODO: Dual-stack support is not implemented.
+func getServiceLoadBalancerIP(service *v1.Service) string {
+ if service == nil {
+ return ""
+ }
+
+ if ip, ok := service.Annotations[consts.ServiceAnnotationLoadBalancerIPDualStack[false]]; ok && ip != "" {
+ return ip
+ }
+ if ip, ok := service.Annotations[consts.ServiceAnnotationLoadBalancerIPDualStack[true]]; ok && ip != "" {
+ return ip
+ }
+
+ // Retrieve LB IP from service.Spec.LoadBalancerIP (will be deprecated)
+ return service.Spec.LoadBalancerIP
+}
+
+// setServiceLoadBalancerIP sets LB IP to a Service
+func setServiceLoadBalancerIP(service *v1.Service, ip string) {
+ if service.Annotations == nil {
+ service.Annotations = map[string]string{}
+ }
+ if net.ParseIP(ip).To4() != nil {
+ service.Annotations[consts.ServiceAnnotationLoadBalancerIPDualStack[false]] = ip
+ return
+ }
+ service.Annotations[consts.ServiceAnnotationLoadBalancerIPDualStack[true]] = ip
+}
+
// GetLoadBalancer returns whether the specified load balancer and its components exist, and
// if so, what its status is.
func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
@@ -51,7 +85,8 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic
// there is a chance that we could orphan public IP resources while we delete the load balancer (kubernetes/kubernetes#80571).
// We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure.
existsPip := func() bool {
- pipName, _, err := az.determinePublicIPName(clusterName, service, nil)
+ var pips []network.PublicIPAddress
+ pipName, _, err := az.determinePublicIPName(clusterName, service, &pips)
if err != nil {
return false
}
@@ -63,11 +98,22 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic
return existsPip
}()
- _, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, []network.LoadBalancer{})
+ existingLBs, err := az.ListLB(service)
if err != nil {
return nil, existsPip, err
}
+ _, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, existingLBs)
+ if err != nil || existsLb {
+ return status, existsLb || existsPip, err
+ }
+
+ flippedService := flipServiceInternalAnnotation(service)
+ _, status, existsLb, err = az.getServiceLoadBalancer(flippedService, clusterName, nil, false, existingLBs)
+ if err != nil || existsLb {
+ return status, existsLb || existsPip, err
+ }
+
// Return exists = false only if the load balancer and the public IP are not found on Azure
if !existsLb && !existsPip {
serviceName := getServiceName(service)
@@ -75,8 +121,8 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic
return nil, false, nil
}
- // Return exists = true if either the load balancer or the public IP (or both) exists
- return status, true, nil
+ // Return exists = true if only the public IP exists
+ return nil, true, nil
}
func getPublicIPDomainNameLabel(service *v1.Service) (string, bool) {
@@ -95,18 +141,22 @@ func (az *Cloud) reconcileService(ctx context.Context, clusterName string, servi
return nil, err
}
- lbStatus, fipConfig, err := az.getServiceLoadBalancerStatus(service, lb, nil)
+ var pips []network.PublicIPAddress
+ lbStatus, fipConfig, err := az.getServiceLoadBalancerStatus(service, lb, &pips)
if err != nil {
klog.Errorf("getServiceLoadBalancerStatus(%s) failed: %v", serviceName, err)
- return nil, err
+ if !errors.Is(err, ErrorNotVmssInstance) {
+ return nil, err
+ }
}
var serviceIP *string
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
serviceIP = &lbStatus.Ingress[0].IP
}
+
klog.V(2).Infof("reconcileService: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
- if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
+ if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, lb.Name, true /* wantLb */); err != nil {
klog.Errorf("reconcileSecurityGroup(%s) failed: %#v", serviceName, err)
return nil, err
}
@@ -138,11 +188,16 @@ func (az *Cloud) reconcileService(ctx context.Context, clusterName string, servi
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
// When a client updates the internal load balancer annotation,
- // the service may be switched from an internal LB to a public one, or vise versa.
+ // the service may be switched from an internal LB to a public one, or vice versa.
// Here we'll firstly ensure service do not lie in the opposite LB.
+
+ // Serialize service reconcile process
+ az.serviceReconcileLock.Lock()
+ defer az.serviceReconcileLock.Unlock()
+
var err error
serviceName := getServiceName(service)
- mc := metrics.NewMetricContext("services", "ensure_loadbalancer", az.ResourceGroup, az.SubscriptionID, serviceName)
+ mc := metrics.NewMetricContext("services", "ensure_loadbalancer", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), serviceName)
klog.V(5).InfoS("EnsureLoadBalancer Start", "service", serviceName, "cluster", clusterName, "service_spec", service)
isOperationSucceeded := false
@@ -160,11 +215,28 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
return lbStatus, nil
}
+func (az *Cloud) getLatestService(service *v1.Service) (*v1.Service, bool, error) {
+ latestService, err := az.serviceLister.Services(service.Namespace).Get(service.Name)
+ switch {
+ case apierrors.IsNotFound(err):
+ // service absence in store means the service deletion is caught by watcher
+ return nil, false, nil
+ case err != nil:
+ return nil, false, err
+ default:
+ return latestService.DeepCopy(), true, nil
+ }
+}
+
// UpdateLoadBalancer updates hosts under the specified load balancer.
func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
+ // Serialize service reconcile process
+ az.serviceReconcileLock.Lock()
+ defer az.serviceReconcileLock.Unlock()
+
var err error
serviceName := getServiceName(service)
- mc := metrics.NewMetricContext("services", "update_loadbalancer", az.ResourceGroup, az.SubscriptionID, serviceName)
+ mc := metrics.NewMetricContext("services", "update_loadbalancer", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), serviceName)
klog.V(5).InfoS("UpdateLoadBalancer Start", "service", serviceName, "cluster", clusterName, "service_spec", service)
isOperationSucceeded := false
defer func() {
@@ -172,6 +244,17 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser
klog.V(5).InfoS("UpdateLoadBalancer Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err)
}()
+ // In case UpdateLoadBalancer gets stale service spec, retrieve the latest from lister
+ service, serviceExists, err := az.getLatestService(service)
+ if err != nil {
+ return fmt.Errorf("UpdateLoadBalancer: failed to get latest service %s: %w", service.Name, err)
+ }
+ if !serviceExists {
+ isOperationSucceeded = true
+ klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because service is going to be deleted", service.Name)
+ return nil
+ }
+
shouldUpdateLB, err := az.shouldUpdateLoadBalancer(clusterName, service, nodes)
if err != nil {
return err
@@ -199,10 +282,13 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser
// have multiple underlying components, meaning a Get could say that the LB
// doesn't exist even if some part of it is still laying around.
func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
+ // Serialize service reconcile process
+ az.serviceReconcileLock.Lock()
+ defer az.serviceReconcileLock.Unlock()
+
var err error
- isInternal := requiresInternalLoadBalancer(service)
serviceName := getServiceName(service)
- mc := metrics.NewMetricContext("services", "ensure_loadbalancer_deleted", az.ResourceGroup, az.SubscriptionID, serviceName)
+ mc := metrics.NewMetricContext("services", "ensure_loadbalancer_deleted", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), serviceName)
klog.V(5).InfoS("EnsureLoadBalancerDeleted Start", "service", serviceName, "cluster", clusterName, "service_spec", service)
isOperationSucceeded := false
defer func() {
@@ -210,13 +296,13 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
klog.V(5).InfoS("EnsureLoadBalancerDeleted Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err)
}()
- serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
+ serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service)
if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
return err
}
klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
- _, err = az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */)
+ _, err = az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, nil, false /* wantLb */)
if err != nil {
return err
}
@@ -226,6 +312,12 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
return err
}
+ // check flipped service also
+ flippedService := flipServiceInternalAnnotation(service)
+ if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil {
+ return err
+ }
+
_, err = az.reconcilePublicIP(clusterName, service, "", false /* wantLb */)
if err != nil {
return err
@@ -432,12 +524,10 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs
// safeDeleteLoadBalancer deletes the load balancer after decoupling it from the vmSet
func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vmSetName string, service *v1.Service) *retry.Error {
- if isLBBackendPoolTypeIPConfig(service, &lb, clusterName) {
- lbBackendPoolID := az.getBackendPoolID(to.String(lb.Name), az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
- err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
- if err != nil {
- return retry.NewError(false, fmt.Errorf("safeDeleteLoadBalancer: failed to EnsureBackendPoolDeleted: %w", err))
- }
+ lbBackendPoolID := az.getBackendPoolID(to.String(lb.Name), az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
+ _, err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
+ if err != nil {
+ return retry.NewError(false, fmt.Errorf("safeDeleteLoadBalancer: failed to EnsureBackendPoolDeleted: %w", err))
}
klog.V(2).Infof("safeDeleteLoadBalancer: deleting LB %s", to.String(lb.Name))
@@ -455,7 +545,7 @@ func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vm
// 1. Using multiple slbs and the vmSet is supposed to share the primary slb.
// 2. When migrating from multiple slbs to single slb mode.
// It also ensures those vmSets are joint the backend pools of the primary SLBs.
-// It runs only once everytime the cloud controller manager restarts.
+// It runs only once every time the cloud controller manager restarts.
func (az *Cloud) reconcileSharedLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node) ([]network.LoadBalancer, error) {
var (
existingLBs []network.LoadBalancer
@@ -556,7 +646,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
}
// reuse pip list to reduce api call
- var pips *[]network.PublicIPAddress
+ var pips []network.PublicIPAddress
// check if the service already has a load balancer
for i := range existingLBs {
@@ -594,7 +684,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
continue
}
var fipConfig *network.FrontendIPConfiguration
- status, fipConfig, err = az.getServiceLoadBalancerStatus(service, &existingLB, pips)
+ status, fipConfig, err = az.getServiceLoadBalancerStatus(service, &existingLB, &pips)
if err != nil {
return nil, nil, false, err
}
@@ -728,6 +818,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
return selectedLB, existsLb, nil
}
+// pips: a non-nil pointer to a slice of existing PIPs, if the slice being pointed to is nil, listPIP would be called when needed and the slice would be filled
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer, pips *[]network.PublicIPAddress) (status *v1.LoadBalancerStatus, fipConfig *network.FrontendIPConfiguration, err error) {
if lb == nil {
klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
@@ -794,9 +885,8 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
return nil, nil, nil
}
+// pips: a non-nil pointer to a slice of existing PIPs, if the slice being pointed to is nil, listPIP would be called when needed and the slice would be filled
func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service, pips *[]network.PublicIPAddress) (string, bool, error) {
- var shouldPIPExisted bool
-
if name, found := service.Annotations[consts.ServiceAnnotationPIPName]; found && name != "" {
return name, true, nil
}
@@ -805,35 +895,39 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service,
}
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
- loadBalancerIP := service.Spec.LoadBalancerIP
+ loadBalancerIP := getServiceLoadBalancerIP(service)
// Assume that the service without loadBalancerIP set is a primary service.
// If a secondary service doesn't set the loadBalancerIP, it is not allowed to share the IP.
if len(loadBalancerIP) == 0 {
- return az.getPublicIPName(clusterName, service), shouldPIPExisted, nil
+ return az.getPublicIPName(clusterName, service), false, nil
}
// For the services with loadBalancerIP set, an existing public IP is required, primary
// or secondary, or a public IP not found error would be reported.
pip, err := az.findMatchedPIPByLoadBalancerIP(service, loadBalancerIP, pipResourceGroup, pips)
if err != nil {
- return "", shouldPIPExisted, err
+ return "", false, err
}
if pip != nil && pip.Name != nil {
- return *pip.Name, shouldPIPExisted, nil
+ return *pip.Name, false, nil
}
- return "", shouldPIPExisted, fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup)
+ return "", false, fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup)
}
+// pips: a non-nil pointer to a slice of existing PIPs, if the slice being pointed to is nil, listPIP would be called when needed and the slice would be filled
func (az *Cloud) findMatchedPIPByLoadBalancerIP(service *v1.Service, loadBalancerIP, pipResourceGroup string, pips *[]network.PublicIPAddress) (*network.PublicIPAddress, error) {
if pips == nil {
+ // this should not happen
+ return nil, fmt.Errorf("findMatchedPIPByLoadBalancerIP: nil pip list passed")
+ } else if *pips == nil {
pipList, err := az.ListPIP(service, pipResourceGroup)
if err != nil {
return nil, err
}
- pips = &pipList
+ *pips = pipList
}
for _, pip := range *pips {
if pip.PublicIPAddressPropertiesFormat.IPAddress != nil &&
@@ -863,14 +957,15 @@ func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
func updateServiceLoadBalancerIP(service *v1.Service, serviceIP string) *v1.Service {
copyService := service.DeepCopy()
if len(serviceIP) > 0 && copyService != nil {
- copyService.Spec.LoadBalancerIP = serviceIP
+ setServiceLoadBalancerIP(copyService, serviceIP)
}
return copyService
}
-func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
- if len(service.Spec.LoadBalancerIP) > 0 {
- return service.Spec.LoadBalancerIP, nil
+func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service) (string, error) {
+ lbIP := getServiceLoadBalancerIP(service)
+ if len(lbIP) > 0 {
+ return lbIP, nil
}
if len(service.Status.LoadBalancer.Ingress) > 0 && len(service.Status.LoadBalancer.Ingress[0].IP) > 0 {
@@ -1000,6 +1095,9 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
}
klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
}
+ if az.ensurePIPTagged(service, &pip) {
+ changed = true
+ }
if foundDNSLabelAnnotation {
updatedDNSSettings, err := reconcileDNSSettings(&pip, domainNameLabel, serviceName, pipName)
@@ -1045,10 +1143,9 @@ func (az *Cloud) reconcileIPSettings(pip *network.PublicIPAddress, service *v1.S
serviceName := getServiceName(service)
ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
if ipv6 {
- klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
-
if !strings.EqualFold(string(pip.PublicIPAddressVersion), string(network.IPVersionIPv6)) {
pip.PublicIPAddressVersion = network.IPVersionIPv6
+ klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
changed = true
}
@@ -1063,10 +1160,9 @@ func (az *Cloud) reconcileIPSettings(pip *network.PublicIPAddress, service *v1.S
changed = true
}
} else {
- klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
-
- if !strings.EqualFold(string(pip.PublicIPAddressVersion), string(network.IPVersionIPv6)) {
+ if !strings.EqualFold(string(pip.PublicIPAddressVersion), string(network.IPVersionIPv4)) {
pip.PublicIPAddressVersion = network.IPVersionIPv4
+ klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
changed = true
}
}
@@ -1097,7 +1193,8 @@ func reconcileDNSSettings(pip *network.PublicIPAddress, domainNameLabel, service
} else {
existingDNSLabel := pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel
if !strings.EqualFold(to.String(existingDNSLabel), domainNameLabel) {
- return false, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing DNS label %s on the public IP", serviceName, pipName, *existingDNSLabel)
+ pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel = &domainNameLabel
+ changed = true
}
}
@@ -1124,6 +1221,11 @@ func getServiceFromPIPDNSTags(tags map[string]*string) string {
return ""
}
+func deleteServicePIPDNSTags(tags *map[string]*string) {
+ delete(*tags, consts.ServiceUsingDNSKey)
+ delete(*tags, consts.LegacyServiceUsingDNSKey)
+}
+
func getServiceFromPIPServiceTags(tags map[string]*string) string {
v, ok := tags[consts.ServiceTagKey]
if ok && v != nil {
@@ -1246,6 +1348,7 @@ func getDomainNameLabel(pip *network.PublicIPAddress) string {
return to.String(pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel)
}
+// pips: a non-nil pointer to a slice of existing PIPs, if the slice being pointed to is nil, listPIP would be called when needed and the slice would be filled
func (az *Cloud) isFrontendIPChanged(clusterName string, config network.FrontendIPConfiguration, service *v1.Service, lbFrontendIPConfigName string, pips *[]network.PublicIPAddress) (bool, error) {
isServiceOwnsFrontendIP, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service, pips)
if err != nil {
@@ -1257,7 +1360,7 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend
if !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) {
return false, nil
}
- loadBalancerIP := service.Spec.LoadBalancerIP
+ loadBalancerIP := getServiceLoadBalancerIP(service)
isInternal := requiresInternalLoadBalancer(service)
if isInternal {
// Judge subnet
@@ -1274,10 +1377,7 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend
return true, nil
}
}
- if loadBalancerIP == "" {
- return config.PrivateIPAllocationMethod == network.IPAllocationMethodStatic, nil
- }
- return config.PrivateIPAllocationMethod != network.IPAllocationMethodStatic || !strings.EqualFold(loadBalancerIP, to.String(config.PrivateIPAddress)), nil
+ return loadBalancerIP != "" && !strings.EqualFold(loadBalancerIP, to.String(config.PrivateIPAddress)), nil
}
pipName, _, err := az.determinePublicIPName(clusterName, service, pips)
if err != nil {
@@ -1409,22 +1509,22 @@ func (az *Cloud) findFrontendIPConfigOfService(
fipConfigs *[]network.FrontendIPConfiguration,
service *v1.Service,
pips *[]network.PublicIPAddress,
-) (*network.FrontendIPConfiguration, bool, error) {
+) (*network.FrontendIPConfiguration, error) {
for _, config := range *fipConfigs {
- owns, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service, pips)
+ owns, _, err := az.serviceOwnsFrontendIP(config, service, pips)
if err != nil {
- return nil, false, err
+ return nil, err
}
if owns {
- return &config, isPrimaryService, nil
+ return &config, nil
}
}
- return nil, false, nil
+ return nil, nil
}
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
-// This also reconciles the Service's Ports with the LoadBalancer config.
+// This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
// nodes only used if wantLb is true
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
@@ -1553,7 +1653,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
if !exist {
return nil, fmt.Errorf("load balancer %q not found", lbName)
}
- lb = &newLB
+ lb = newLB
}
}
@@ -1684,12 +1784,12 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
}
// Save pip list so it can be reused in loop
- var pips *[]network.PublicIPAddress
+ var pips []network.PublicIPAddress
var ownedFIPConfig *network.FrontendIPConfiguration
if !wantLb {
for i := len(newConfigs) - 1; i >= 0; i-- {
config := newConfigs[i]
- isServiceOwnsFrontendIP, _, err := az.serviceOwnsFrontendIP(config, service, pips)
+ isServiceOwnsFrontendIP, _, err := az.serviceOwnsFrontendIP(config, service, &pips)
if err != nil {
return nil, toDeleteConfigs, false, err
}
@@ -1727,13 +1827,13 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
)
for i := len(newConfigs) - 1; i >= 0; i-- {
config := newConfigs[i]
- isServiceOwnsFrontendIP, _, _ := az.serviceOwnsFrontendIP(config, service, pips)
+ isServiceOwnsFrontendIP, _, _ := az.serviceOwnsFrontendIP(config, service, &pips)
if !isServiceOwnsFrontendIP {
klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): the frontend IP configuration %s does not belong to the service", serviceName, to.String(config.Name))
continue
}
- klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): checking owned frontend IP cofiguration %s", serviceName, to.String(config.Name))
- isFipChanged, err = az.isFrontendIPChanged(clusterName, config, service, defaultLBFrontendIPConfigName, pips)
+ klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): checking owned frontend IP configuration %s", serviceName, to.String(config.Name))
+ isFipChanged, err = az.isFrontendIPChanged(clusterName, config, service, defaultLBFrontendIPConfigName, &pips)
if err != nil {
return nil, toDeleteConfigs, false, err
}
@@ -1747,7 +1847,7 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
break
}
- ownedFIPConfig, _, err = az.findFrontendIPConfigOfService(&newConfigs, service, pips)
+ ownedFIPConfig, err = az.findFrontendIPConfigOfService(&newConfigs, service, &pips)
if err != nil {
return nil, toDeleteConfigs, false, err
}
@@ -1768,7 +1868,7 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
}
if !existsSubnet {
- return nil, toDeleteConfigs, false, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, az.SubnetName)
+ return nil, toDeleteConfigs, false, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, *subnetName)
}
configProperties := network.FrontendIPConfigurationPropertiesFormat{
@@ -1779,11 +1879,11 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
configProperties.PrivateIPAddressVersion = network.IPVersionIPv6
}
- loadBalancerIP := service.Spec.LoadBalancerIP
+ loadBalancerIP := getServiceLoadBalancerIP(service)
if loadBalancerIP != "" {
configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic
configProperties.PrivateIPAddress = &loadBalancerIP
- } else if status != nil && len(status.Ingress) > 0 {
+ } else if status != nil && len(status.Ingress) > 0 && ipInSubnet(status.Ingress[0].IP, &subnet) {
klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): keep the original private IP %s", serviceName, status.Ingress[0].IP)
configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic
configProperties.PrivateIPAddress = to.StringPtr(status.Ingress[0].IP)
@@ -1795,7 +1895,7 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
fipConfigurationProperties = &configProperties
} else {
- pipName, shouldPIPExisted, err := az.determinePublicIPName(clusterName, service, pips)
+ pipName, shouldPIPExisted, err := az.determinePublicIPName(clusterName, service, &pips)
if err != nil {
return nil, toDeleteConfigs, false, err
}
@@ -1811,7 +1911,7 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
newConfig := network.FrontendIPConfiguration{
Name: to.StringPtr(defaultLBFrontendIPConfigName),
- ID: to.StringPtr(fmt.Sprintf(consts.FrontendIPConfigIDTemplate, az.SubscriptionID, az.ResourceGroup, *lb.Name, defaultLBFrontendIPConfigName)),
+ ID: to.StringPtr(fmt.Sprintf(consts.FrontendIPConfigIDTemplate, az.getNetworkResourceSubscriptionID(), az.ResourceGroup, *lb.Name, defaultLBFrontendIPConfigName)),
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
}
@@ -1838,7 +1938,7 @@ func (az *Cloud) getFrontendZones(
fipConfig *network.FrontendIPConfiguration,
previousZone *[]string,
isFipChanged bool,
- serviceName, defaultLBFrontendIPConfigName string) error {
+ serviceName, lbFrontendIPConfigName string) error {
if !isFipChanged { // fetch zone information from API for new frontends
// only add zone information for new internal frontend IP configurations for standard load balancer not deployed to an edge zone.
location := az.Location
@@ -1851,10 +1951,10 @@ func (az *Cloud) getFrontendZones(
}
} else {
if previousZone == nil { // keep the existing zone information for existing frontends
- klog.V(2).Infof("getFrontendZones for service (%s): lb frontendconfig(%s): setting zone to nil", serviceName, defaultLBFrontendIPConfigName)
+ klog.V(2).Infof("getFrontendZones for service (%s): lb frontendconfig(%s): setting zone to nil", serviceName, lbFrontendIPConfigName)
} else {
zoneStr := strings.Join(*previousZone, ",")
- klog.V(2).Infof("getFrontendZones for service (%s): lb frontendconfig(%s): setting zone to %s", serviceName, defaultLBFrontendIPConfigName, zoneStr)
+ klog.V(2).Infof("getFrontendZones for service (%s): lb frontendconfig(%s): setting zone to %s", serviceName, lbFrontendIPConfigName, zoneStr)
}
fipConfig.Zones = previousZone
}
@@ -1965,7 +2065,7 @@ func lbRuleConflictsWithPort(rule network.LoadBalancingRule, frontendIPConfigID
// buildHealthProbeRulesForPort
// for following sku: basic loadbalancer vs standard load balancer
// for following protocols: TCP HTTP HTTPS(SLB only)
-func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, port v1.ServicePort, lbrule string) (*network.Probe, error) {
+func (az *Cloud) buildHealthProbeRulesForPort(serviceManifest *v1.Service, port v1.ServicePort, lbrule string) (*network.Probe, error) {
if port.Protocol == v1.ProtocolUDP || port.Protocol == v1.ProtocolSCTP {
return nil, nil
}
@@ -1973,19 +2073,45 @@ func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, por
properties := &network.ProbePropertiesFormat{}
var err error
- if port.AppProtocol == nil {
- if port.AppProtocol, err = consts.GetAttributeValueInSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeProtocol); err != nil {
+
+ // order - Specific Override
+ // port_ annotation
+ // global annotation
+
+ // Select Protocol
+ //
+ var protocol *string
+
+ // 1. Look up port-specific override
+ protocol, err = consts.GetHealthProbeConfigOfPortFromK8sSvcAnnotation(serviceManifest.Annotations, port.Port, consts.HealthProbeParamsProtocol)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsProtocol), err)
+ }
+
+ // 2. If not specified, look up from AppProtocol
+ // Note - this order is to remain compatible with previous versions
+ if protocol == nil {
+ protocol = port.AppProtocol
+ }
+
+ // 3. If protocol is still nil, check the global annotation
+ if protocol == nil {
+ protocol, err = consts.GetAttributeValueInSvcAnnotation(serviceManifest.Annotations, consts.ServiceAnnotationLoadBalancerHealthProbeProtocol)
+ if err != nil {
return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeProtocol, err)
}
- if port.AppProtocol == nil {
- port.AppProtocol = to.StringPtr(string(network.ProtocolTCP))
- }
}
- protocol := strings.TrimSpace(*port.AppProtocol)
+
+ // 4. Finally, if protocol is still nil, default to TCP
+ if protocol == nil {
+ protocol = to.StringPtr(string(network.ProtocolTCP))
+ }
+
+ *protocol = strings.TrimSpace(*protocol)
switch {
- case strings.EqualFold(protocol, string(network.ProtocolTCP)):
+ case strings.EqualFold(*protocol, string(network.ProtocolTCP)):
properties.Protocol = network.ProbeProtocolTCP
- case strings.EqualFold(protocol, string(network.ProtocolHTTPS)):
+ case strings.EqualFold(*protocol, string(network.ProtocolHTTPS)):
//HTTPS probe is only supported in standard loadbalancer
//For backward compatibility,when unsupported protocol is used, fall back to tcp protocol in basic lb mode instead
if !az.useStandardLoadBalancer() {
@@ -1993,21 +2119,81 @@ func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, por
} else {
properties.Protocol = network.ProbeProtocolHTTPS
}
- case strings.EqualFold(protocol, string(network.ProtocolHTTP)):
+ case strings.EqualFold(*protocol, string(network.ProtocolHTTP)):
properties.Protocol = network.ProbeProtocolHTTP
default:
//For backward compatibility,when unsupported protocol is used, fall back to tcp protocol in basic lb mode instead
properties.Protocol = network.ProbeProtocolTCP
}
+ // Lookup or Override Health Probe Port
+ properties.Port = &port.NodePort
+
+ probePort, err := consts.GetHealthProbeConfigOfPortFromK8sSvcAnnotation(serviceManifest.Annotations, port.Port, consts.HealthProbeParamsPort, func(s *string) error {
+ if s == nil {
+ return nil
+ }
+ //nolint:gosec
+ port, err := strconv.Atoi(*s)
+ if err != nil {
+ //not a integer
+ for _, item := range serviceManifest.Spec.Ports {
+ if strings.EqualFold(item.Name, *s) {
+ //found the port
+ return nil
+ }
+ }
+ return fmt.Errorf("port %s not found in service", *s)
+ }
+ if port < 0 || port > 65535 {
+ return fmt.Errorf("port %d is out of range", port)
+ }
+ for _, item := range serviceManifest.Spec.Ports {
+ //nolint:gosec
+ if item.Port == int32(port) {
+ //found the port
+ return nil
+ }
+ }
+ return fmt.Errorf("port %s not found in service", *s)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsPort), err)
+ }
+
+ if probePort != nil {
+ //nolint:gosec
+ port, err := strconv.Atoi(*probePort)
+ if err != nil {
+ //not a integer
+ for _, item := range serviceManifest.Spec.Ports {
+ if strings.EqualFold(item.Name, *probePort) {
+ //found the port
+ properties.Port = to.Int32Ptr(item.NodePort)
+ }
+ }
+ } else {
+ if port >= 0 || port <= 65535 {
+ for _, item := range serviceManifest.Spec.Ports {
+ //nolint:gosec
+ if item.Port == int32(port) {
+ //found the port
+ properties.Port = to.Int32Ptr(item.NodePort)
+ }
+ }
+ }
+ }
+ }
+
+ // Select request path
if strings.EqualFold(string(properties.Protocol), string(network.ProtocolHTTPS)) || strings.EqualFold(string(properties.Protocol), string(network.ProtocolHTTP)) {
// get request path ,only used with http/https probe
- path, err := consts.GetHealthProbeConfigOfPortFromK8sSvcAnnotation(annotations, port.Port, consts.HealthProbeParamsRequestPath)
+ path, err := consts.GetHealthProbeConfigOfPortFromK8sSvcAnnotation(serviceManifest.Annotations, port.Port, consts.HealthProbeParamsRequestPath)
if err != nil {
return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsRequestPath), err)
}
if path == nil {
- if path, err = consts.GetAttributeValueInSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath); err != nil {
+ if path, err = consts.GetAttributeValueInSvcAnnotation(serviceManifest.Annotations, consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath); err != nil {
return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath, err)
}
}
@@ -2027,12 +2213,12 @@ func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, por
}
return nil
}
- numberOfProbes, err := consts.GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(annotations, port.Port, consts.HealthProbeParamsNumOfProbe, numOfProbeValidator)
+ numberOfProbes, err := consts.GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(serviceManifest.Annotations, port.Port, consts.HealthProbeParamsNumOfProbe, numOfProbeValidator)
if err != nil {
return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsNumOfProbe), err)
}
if numberOfProbes == nil {
- if numberOfProbes, err = consts.Getint32ValueFromK8sSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, numOfProbeValidator); err != nil {
+ if numberOfProbes, err = consts.Getint32ValueFromK8sSvcAnnotation(serviceManifest.Annotations, consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, numOfProbeValidator); err != nil {
return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, err)
}
}
@@ -2053,12 +2239,12 @@ func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, por
}
return nil
}
- probeInterval, err := consts.GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(annotations, port.Port, consts.HealthProbeParamsProbeInterval, probeIntervalValidator)
+ probeInterval, err := consts.GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(serviceManifest.Annotations, port.Port, consts.HealthProbeParamsProbeInterval, probeIntervalValidator)
if err != nil {
return nil, fmt.Errorf("failed to parse annotation %s:%w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsProbeInterval), err)
}
if probeInterval == nil {
- if probeInterval, err = consts.Getint32ValueFromK8sSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeInterval, probeIntervalValidator); err != nil {
+ if probeInterval, err = consts.Getint32ValueFromK8sSvcAnnotation(serviceManifest.Annotations, consts.ServiceAnnotationLoadBalancerHealthProbeInterval, probeIntervalValidator); err != nil {
return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeInterval, err)
}
}
@@ -2073,7 +2259,6 @@ func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, por
}
properties.IntervalInSeconds = probeInterval
properties.NumberOfProbes = numberOfProbes
- properties.Port = &port.NodePort
probe := &network.Probe{
Name: &lbrule,
ProbePropertiesFormat: properties,
@@ -2132,17 +2317,17 @@ func (az *Cloud) getExpectedLBRules(
if nodeEndpointHealthprobe == nil {
// use user customized health probe rule if any
for _, port := range service.Spec.Ports {
- portprobe, err := az.buildHealthProbeRulesForPort(service.Annotations, port, lbRuleName)
+ portprobe, err := az.buildHealthProbeRulesForPort(service, port, lbRuleName)
if err != nil {
klog.V(2).ErrorS(err, "error occurred when buildHealthProbeRulesForPort", "service", service.Name, "namespace", service.Namespace,
"rule-name", lbRuleName, "port", port.Port)
//ignore error because we only need one correct rule
}
if portprobe != nil {
- expectedProbes = append(expectedProbes, *portprobe)
props.Probe = &network.SubResource{
ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *portprobe.Name)),
}
+ expectedProbes = append(expectedProbes, *portprobe)
break
}
}
@@ -2163,7 +2348,16 @@ func (az *Cloud) getExpectedLBRules(
for _, port := range service.Spec.Ports {
lbRuleName := az.getLoadBalancerRuleName(service, port.Protocol, port.Port)
klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s)", lbName, lbRuleName)
-
+ isNoLBRuleRequired, err := consts.IsLBRuleOnK8sServicePortDisabled(service.Annotations, port.Port)
+ if err != nil {
+ err := fmt.Errorf("failed to parse annotation %s: %w", consts.BuildAnnotationKeyForPort(port.Port, consts.PortAnnotationNoLBRule), err)
+ klog.V(2).ErrorS(err, "error occurred when getExpectedLoadBalancingRulePropertiesForPort", "service", service.Name, "namespace", service.Namespace,
+ "rule-name", lbRuleName, "port", port.Port)
+ }
+ if isNoLBRuleRequired {
+ klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s) no lb rule required", lbName, lbRuleName)
+ continue
+ }
if port.Protocol == v1.ProtocolSCTP && !(az.useStandardLoadBalancer() && consts.IsK8sServiceUsingInternalLoadBalancer(service)) {
return expectedProbes, expectedRules, fmt.Errorf("SCTP is only supported on standard loadbalancer in internal mode")
}
@@ -2177,36 +2371,47 @@ func (az *Cloud) getExpectedLBRules(
return expectedProbes, expectedRules, fmt.Errorf("error generate lb rule for ha mod loadbalancer. err: %w", err)
}
- if nodeEndpointHealthprobe == nil {
- portprobe, err := az.buildHealthProbeRulesForPort(service.Annotations, port, lbRuleName)
- if err != nil {
- klog.V(2).ErrorS(err, "error occurred when buildHealthProbeRulesForPort", "service", service.Name, "namespace", service.Namespace,
- "rule-name", lbRuleName, "port", port.Port)
- return expectedProbes, expectedRules, err
- }
- if portprobe != nil {
- expectedProbes = append(expectedProbes, *portprobe)
+ isNoHealthProbeRule, err := consts.IsHealthProbeRuleOnK8sServicePortDisabled(service.Annotations, port.Port)
+ if err != nil {
+ err := fmt.Errorf("failed to parse annotation %s: %w", consts.BuildAnnotationKeyForPort(port.Port, consts.PortAnnotationNoHealthProbeRule), err)
+ klog.V(2).ErrorS(err, "error occurred when buildHealthProbeRulesForPort", "service", service.Name, "namespace", service.Namespace,
+ "rule-name", lbRuleName, "port", port.Port)
+ }
+ if !isNoHealthProbeRule {
+ if nodeEndpointHealthprobe == nil {
+ portprobe, err := az.buildHealthProbeRulesForPort(service, port, lbRuleName)
+ if err != nil {
+ klog.V(2).ErrorS(err, "error occurred when buildHealthProbeRulesForPort", "service", service.Name, "namespace", service.Namespace,
+ "rule-name", lbRuleName, "port", port.Port)
+ return expectedProbes, expectedRules, err
+ }
+ if portprobe != nil {
+ props.Probe = &network.SubResource{
+ ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *portprobe.Name)),
+ }
+ expectedProbes = append(expectedProbes, *portprobe)
+ }
+ } else {
props.Probe = &network.SubResource{
- ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *portprobe.Name)),
+ ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *nodeEndpointHealthprobe.Name)),
}
}
- } else {
- props.Probe = &network.SubResource{
- ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *nodeEndpointHealthprobe.Name)),
- }
+ }
+ if consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) {
+ props.BackendPort = to.Int32Ptr(port.NodePort)
+ props.EnableFloatingIP = to.BoolPtr(false)
}
expectedRules = append(expectedRules, network.LoadBalancingRule{
Name: &lbRuleName,
LoadBalancingRulePropertiesFormat: props,
})
-
}
}
return expectedProbes, expectedRules, nil
}
-//getDefaultLoadBalancingRulePropertiesFormat returns the loadbalancing rule for one port
+// getDefaultLoadBalancingRulePropertiesFormat returns the loadbalancing rule for one port
func (az *Cloud) getExpectedLoadBalancingRulePropertiesForPort(
service *v1.Service,
lbFrontendIPConfigID string,
@@ -2262,7 +2467,7 @@ func (az *Cloud) getExpectedLoadBalancingRulePropertiesForPort(
return props, nil
}
-//getExpectedHAModeLoadBalancingRuleProperties build load balancing rule for lb in HA mode
+// getExpectedHAModeLoadBalancingRuleProperties build load balancing rule for lb in HA mode
func (az *Cloud) getExpectedHAModeLoadBalancingRuleProperties(
service *v1.Service,
lbFrontendIPConfigID string,
@@ -2277,7 +2482,7 @@ func (az *Cloud) getExpectedHAModeLoadBalancingRuleProperties(
// This reconciles the Network Security Group similar to how the LB is reconciled.
// This entails adding required, missing SecurityRules and removing stale rules.
-func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
+func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, lbName *string, wantLb bool) (*network.SecurityGroup, error) {
serviceName := getServiceName(service)
klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
@@ -2307,9 +2512,30 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
destinationIPAddress = "*"
}
+ disableFloatingIP := false
+ if consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) {
+ disableFloatingIP = true
+ }
+
+ backendIPAddresses := make([]string, 0)
+ if wantLb && disableFloatingIP {
+ lb, exist, err := az.getAzureLoadBalancer(to.String(lbName), azcache.CacheReadTypeDefault)
+ if err != nil {
+ return nil, err
+ }
+ if !exist {
+ return nil, fmt.Errorf("unable to get lb %s", to.String(lbName))
+ }
+ backendPrivateIPv4s, backendPrivateIPv6s := az.LoadBalancerBackendPool.GetBackendPrivateIPs(clusterName, service, lb)
+ backendIPAddresses = backendPrivateIPv4s
+ if utilnet.IsIPv6String(*lbIP) {
+ backendIPAddresses = backendPrivateIPv6s
+ }
+ }
+
additionalIPs, err := getServiceAdditionalPublicIPs(service)
if err != nil {
- return nil, fmt.Errorf("unable to get additional public IPs, error=%v", err)
+ return nil, fmt.Errorf("unable to get additional public IPs, error=%w", err)
}
destinationIPAddresses := []string{destinationIPAddress}
@@ -2338,7 +2564,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
sourceAddressPrefixes = append(sourceAddressPrefixes, serviceTags...)
}
- expectedSecurityRules, err := az.getExpectedSecurityRules(wantLb, ports, sourceAddressPrefixes, service, destinationIPAddresses, sourceRanges)
+ expectedSecurityRules, err := az.getExpectedSecurityRules(wantLb, ports, sourceAddressPrefixes, service, destinationIPAddresses, sourceRanges, backendIPAddresses, disableFloatingIP)
if err != nil {
return nil, err
}
@@ -2472,13 +2698,15 @@ func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, service *v1.Se
}
}
+ updatedRules = removeDuplicatedSecurityRules(updatedRules)
+
for _, r := range updatedRules {
klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
}
return dirtySg, updatedRules, nil
}
-func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, sourceAddressPrefixes []string, service *v1.Service, destinationIPAddresses []string, sourceRanges utilnet.IPNetSet) ([]network.SecurityRule, error) {
+func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, sourceAddressPrefixes []string, service *v1.Service, destinationIPAddresses []string, sourceRanges utilnet.IPNetSet, backendIPAddresses []string, disableFloatingIP bool) ([]network.SecurityRule, error) {
expectedSecurityRules := []network.SecurityRule{}
if wantLb {
@@ -2489,6 +2717,10 @@ func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, s
if err != nil {
return nil, err
}
+ dstPort := port.Port
+ if disableFloatingIP {
+ dstPort = port.NodePort
+ }
for j := range sourceAddressPrefixes {
ix := i*len(sourceAddressPrefixes) + j
securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j])
@@ -2497,13 +2729,16 @@ func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, s
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
Protocol: *securityProto,
SourcePortRange: to.StringPtr("*"),
- DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))),
+ DestinationPortRange: to.StringPtr(strconv.Itoa(int(dstPort))),
SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]),
Access: network.SecurityRuleAccessAllow,
Direction: network.SecurityRuleDirectionInbound,
},
}
- if len(destinationIPAddresses) == 1 {
+
+ if len(destinationIPAddresses) == 1 && disableFloatingIP {
+ nsgRule.DestinationAddressPrefixes = to.StringSlicePtr(backendIPAddresses)
+ } else if len(destinationIPAddresses) == 1 && !disableFloatingIP {
// continue to use DestinationAddressPrefix to avoid NSG updates for existing rules.
nsgRule.DestinationAddressPrefix = to.StringPtr(destinationIPAddresses[0])
} else {
@@ -2561,7 +2796,7 @@ func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Servic
}
_, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nodes, false, existingManagedLBs)
- return existsLb && service.ObjectMeta.DeletionTimestamp == nil, nil
+ return existsLb && service.ObjectMeta.DeletionTimestamp == nil && service.Spec.Type == v1.ServiceTypeLoadBalancer, nil
}
func logSafe(s *string) string {
@@ -2810,11 +3045,10 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
}
if lbName != "" {
- loadBalancer, _, err := az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault)
+ lb, _, err = az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault)
if err != nil {
return nil, err
}
- lb = &loadBalancer
}
discoveredDesiredPublicIP, pipsToBeDeleted, deletedDesiredPublicIP, pipsToBeUpdated, err := az.getPublicIPUpdates(
@@ -2892,10 +3126,9 @@ func (az *Cloud) getPublicIPUpdates(
owns, isUserAssignedPIP := serviceOwnsPublicIP(service, &pip, clusterName)
if owns {
var dirtyPIP, toBeDeleted bool
- if !wantLb && !isUserAssignedPIP {
+ if !wantLb {
klog.V(2).Infof("reconcilePublicIP for service(%s): unbinding the service from pip %s", serviceName, *pip.Name)
- err = unbindServiceFromPIP(&pip, service, serviceName, clusterName)
- if err != nil {
+ if err = unbindServiceFromPIP(&pip, service, serviceName, clusterName, isUserAssignedPIP); err != nil {
return false, nil, false, nil, err
}
dirtyPIP = true
@@ -3052,12 +3285,12 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti
properties = properties && reflect.DeepEqual(to.Bool(s.EnableTCPReset), to.Bool(t.EnableTCPReset))
}
- properties = properties && reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) &&
- reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) &&
+ properties = properties && equalSubResource(s.FrontendIPConfiguration, t.FrontendIPConfiguration) &&
+ equalSubResource(s.BackendAddressPool, t.BackendAddressPool) &&
reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) &&
reflect.DeepEqual(s.FrontendPort, t.FrontendPort) &&
reflect.DeepEqual(s.BackendPort, t.BackendPort) &&
- reflect.DeepEqual(s.Probe, t.Probe) &&
+ equalSubResource(s.Probe, t.Probe) &&
reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) &&
reflect.DeepEqual(to.Bool(s.DisableOutboundSnat), to.Bool(t.DisableOutboundSnat))
@@ -3067,6 +3300,16 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti
return properties
}
+func equalSubResource(s *network.SubResource, t *network.SubResource) bool {
+ if s == nil && t == nil {
+ return true
+ }
+ if s == nil || t == nil {
+ return false
+ }
+ return strings.EqualFold(to.String(s.ID), to.String(t.ID))
+}
+
// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined.
// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal,
@@ -3154,6 +3397,35 @@ func subnet(service *v1.Service) *string {
return nil
}
+func ipInSubnet(ip string, subnet *network.Subnet) bool {
+ if subnet == nil || subnet.SubnetPropertiesFormat == nil {
+ return false
+ }
+ netIP, err := netip.ParseAddr(ip)
+ if err != nil {
+ klog.Errorf("ipInSubnet: failed to parse ip %s: %v", netIP, err)
+ return false
+ }
+ cidrs := make([]string, 0)
+ if subnet.AddressPrefix != nil {
+ cidrs = append(cidrs, *subnet.AddressPrefix)
+ }
+ if subnet.AddressPrefixes != nil {
+ cidrs = append(cidrs, *subnet.AddressPrefixes...)
+ }
+ for _, cidr := range cidrs {
+ network, err := netip.ParsePrefix(cidr)
+ if err != nil {
+ klog.Errorf("ipInSubnet: failed to parse ip cidr %s: %v", cidr, err)
+ continue
+ }
+ if network.Contains(netIP) {
+ return true
+ }
+ }
+ return false
+}
+
// getServiceLoadBalancerMode parses the mode value.
// if the value is __auto__ it returns isAuto = TRUE.
// if anything else it returns the unique VM set names after trimming spaces.
@@ -3202,7 +3474,7 @@ func getServiceTags(service *v1.Service) []string {
// The pip is user-created if and only if there is no service tags.
// The service owns the pip if:
// 1. The serviceName is included in the service tags of a system-created pip.
-// 2. The service.Spec.LoadBalancerIP matches the IP address of a user-created pip.
+// 2. The service LoadBalancerIP matches the IP address of a user-created pip.
func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clusterName string) (bool, bool) {
if service == nil || pip == nil {
klog.Warningf("serviceOwnsPublicIP: nil service or public IP")
@@ -3222,7 +3494,7 @@ func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clus
// if there is no service tag on the pip, it is user-created pip
if serviceTag == "" {
- return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), true
+ return strings.EqualFold(to.String(pip.IPAddress), getServiceLoadBalancerIP(service)), true
}
// if there is service tag on the pip, it is system-created pip
@@ -3238,9 +3510,9 @@ func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clus
return true, false
}
} else {
- // if the service is not included in te tags of the system-created pip, check the ip address
+ // if the service is not included in the tags of the system-created pip, check the ip address
// this could happen for secondary services
- return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), false
+ return strings.EqualFold(to.String(pip.IPAddress), getServiceLoadBalancerIP(service)), false
}
}
@@ -3328,11 +3600,19 @@ func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []stri
return addedNew, nil
}
-func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, serviceName, clusterName string) error {
+func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service,
+ serviceName, clusterName string, isUserAssignedPIP bool) error {
if pip == nil || pip.Tags == nil {
return fmt.Errorf("nil public IP or tags")
}
+ if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) {
+ deleteServicePIPDNSTags(&pip.Tags)
+ }
+ if isUserAssignedPIP {
+ return nil
+ }
+
// skip removing tags for user assigned pips
serviceTagValue := to.StringPtr(getServiceFromPIPServiceTags(pip.Tags))
existingServiceNames := parsePIPServiceTag(serviceTagValue)
@@ -3341,6 +3621,7 @@ func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, ser
if strings.EqualFold(existingServiceNames[i], serviceName) {
existingServiceNames = append(existingServiceNames[:i], existingServiceNames[i+1:]...)
found = true
+ break
}
}
if !found {
@@ -3348,15 +3629,7 @@ func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, ser
}
_, err := bindServicesToPIP(pip, existingServiceNames, true)
- if err != nil {
- return err
- }
-
- if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) {
- pip.Tags[consts.ServiceUsingDNSKey] = to.StringPtr("")
- }
-
- return nil
+ return err
}
// ensureLoadBalancerTagged ensures every load balancer in the resource group is tagged as configured
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go
index f00db256972c..a94f54eb8b90 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go
@@ -32,6 +32,7 @@ import (
"k8s.io/klog/v2"
utilnet "k8s.io/utils/net"
+ "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
)
@@ -51,6 +52,9 @@ type BackendPool interface {
// ReconcileBackendPools creates the inbound backend pool if it is not existed, and removes nodes that are supposed to be
// excluded from the load balancers.
ReconcileBackendPools(clusterName string, service *v1.Service, lb *network.LoadBalancer) (bool, bool, error)
+
+ // GetBackendPrivateIPs returns the private IPs of LoadBalancer's backend pool
+ GetBackendPrivateIPs(clusterName string, service *v1.Service, lb *network.LoadBalancer) ([]string, []string)
}
type backendPoolTypeNodeIPConfig struct {
@@ -115,15 +119,16 @@ func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(sl
},
}
// decouple the backendPool from the node
- err := bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
+ shouldRefreshLB, err := bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
if err != nil {
return nil, err
}
- slb.BackendAddressPools = &newBackendPools
- // Proactively disable the etag to prevent etag mismatch error when putting lb later.
- // This could happen because when we remove the hosts from the lb, the nrp
- // would put the lb to remove the backend references as well.
- slb.Etag = nil
+ if shouldRefreshLB {
+ slb, _, err = bc.getAzureLoadBalancer(to.String(slb.Name), cache.CacheReadTypeForceRefresh)
+ if err != nil {
+ return nil, fmt.Errorf("bc.CleanupVMSetFromBackendPoolByCondition: failed to get load balancer %s, err: %w", to.String(slb.Name), err)
+ }
+ }
}
return slb, nil
@@ -138,6 +143,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string,
foundBackendPool := false
changed := false
+ shouldRefreshLB := false
lbName := *lb.Name
serviceName := getServiceName(service)
@@ -164,14 +170,13 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string,
bp.LoadBalancerBackendAddresses != nil &&
len(*bp.LoadBalancerBackendAddresses) > 0 {
if removeNodeIPAddressesFromBackendPool(bp, []string{}, true) {
- bp.Etag = nil
if err := bc.CreateOrUpdateLBBackendPool(lbName, bp); err != nil {
klog.Errorf("bc.ReconcileBackendPools for service (%s): failed to cleanup IP based backend pool %s: %s", serviceName, lbBackendPoolName, err.Error())
return false, false, fmt.Errorf("bc.ReconcileBackendPools for service (%s): failed to cleanup IP based backend pool %s: %w", serviceName, lbBackendPoolName, err)
}
newBackendPools[i] = bp
lb.BackendAddressPools = &newBackendPools
- lb.Etag = nil
+ shouldRefreshLB = true
}
}
@@ -180,8 +185,13 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string,
for _, ipConf := range *bp.BackendIPConfigurations {
ipConfID := to.String(ipConf.ID)
nodeName, _, err := bc.VMSet.GetNodeNameByIPConfigurationID(ipConfID)
- if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) {
- return false, false, err
+ if err != nil {
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ klog.V(2).Infof("bc.ReconcileBackendPools for service (%s): vm not found for ipConfID %s", serviceName, ipConfID)
+ backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, ipConf)
+ } else {
+ return false, false, err
+ }
}
// If a node is not supposed to be included in the LB, it
@@ -210,10 +220,13 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string,
},
}
// decouple the backendPool from the node
- err = bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
+ updated, err := bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
if err != nil {
return false, false, err
}
+ if updated {
+ shouldRefreshLB = true
+ }
}
break
} else {
@@ -221,6 +234,13 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string,
}
}
+ if shouldRefreshLB {
+ lb, _, err = bc.getAzureLoadBalancer(lbName, cache.CacheReadTypeForceRefresh)
+ if err != nil {
+ return false, false, fmt.Errorf("bc.ReconcileBackendPools for service (%s): failed to get loadbalancer %s: %w", serviceName, lbName, err)
+ }
+ }
+
if !foundBackendPool {
isBackendPoolPreConfigured = newBackendPool(lb, isBackendPoolPreConfigured, bc.PreConfiguredBackendPoolLoadBalancerTypes, getServiceName(service), getBackendPoolName(clusterName, service))
changed = true
@@ -229,6 +249,48 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string,
return isBackendPoolPreConfigured, changed, err
}
+func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(clusterName string, service *v1.Service, lb *network.LoadBalancer) ([]string, []string) {
+ serviceName := getServiceName(service)
+ lbBackendPoolName := getBackendPoolName(clusterName, service)
+ if lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.BackendAddressPools == nil {
+ return nil, nil
+ }
+
+ backendPrivateIPv4s, backendPrivateIPv6s := sets.NewString(), sets.NewString()
+ for _, bp := range *lb.BackendAddressPools {
+ if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) {
+ klog.V(10).Infof("bc.GetBackendPrivateIPs for service (%s): found wanted backendpool %s", serviceName, to.String(bp.Name))
+ if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil {
+ for _, backendIPConfig := range *bp.BackendIPConfigurations {
+ ipConfigID := to.String(backendIPConfig.ID)
+ nodeName, _, err := bc.VMSet.GetNodeNameByIPConfigurationID(ipConfigID)
+ if err != nil {
+ klog.Errorf("bc.GetBackendPrivateIPs for service (%s): GetNodeNameByIPConfigurationID failed with error: %v", serviceName, err)
+ continue
+ }
+ privateIPsSet, ok := bc.nodePrivateIPs[nodeName]
+ if !ok {
+ klog.Warningf("bc.GetBackendPrivateIPs for service (%s): failed to get private IPs of node %s", serviceName, nodeName)
+ continue
+ }
+ privateIPs := privateIPsSet.List()
+ for _, ip := range privateIPs {
+ klog.V(2).Infof("bc.GetBackendPrivateIPs for service (%s): lb backendpool - found private IPs %s of node %s", serviceName, ip, nodeName)
+ if utilnet.IsIPv4String(ip) {
+ backendPrivateIPv4s.Insert(ip)
+ } else {
+ backendPrivateIPv6s.Insert(ip)
+ }
+ }
+ }
+ }
+ } else {
+ klog.V(10).Infof("bc.GetBackendPrivateIPs for service (%s): found unmanaged backendpool %s", serviceName, to.String(bp.Name))
+ }
+ }
+ return backendPrivateIPv4s.List(), backendPrivateIPv6s.List()
+}
+
type backendPoolTypeNodeIP struct {
*Cloud
}
@@ -389,6 +451,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi
foundBackendPool := false
changed := false
+ shouldRefreshLB := false
lbName := *lb.Name
serviceName := getServiceName(service)
lbBackendPoolName := getBackendPoolName(clusterName, service)
@@ -396,6 +459,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi
lbBackendPoolID := bi.getBackendPoolID(to.String(lb.Name), bi.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
isBackendPoolPreConfigured := bi.isBackendPoolPreConfigured(service)
+ var err error
for i := len(newBackendPools) - 1; i >= 0; i-- {
bp := newBackendPools[i]
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
@@ -410,19 +474,11 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi
// If the LB backend pool type is configured from nodeIPConfiguration
// to nodeIP, we need to decouple the VM NICs from the LB
// before attaching nodeIPs/podIPs to the LB backend pool.
- if bp.BackendAddressPoolPropertiesFormat != nil &&
- bp.BackendIPConfigurations != nil &&
- len(*bp.BackendIPConfigurations) > 0 {
- klog.V(2).Infof("bi.ReconcileBackendPools for service (%s): ensuring the LB is decoupled from the VMSet", serviceName)
- if err := bi.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true); err != nil {
- klog.Errorf("bi.ReconcileBackendPools for service (%s): failed to EnsureBackendPoolDeleted: %s", serviceName, err.Error())
- return false, false, err
- }
- newBackendPools[i].BackendAddressPoolPropertiesFormat.LoadBalancerBackendAddresses = &[]network.LoadBalancerBackendAddress{}
- newBackendPools[i].BackendAddressPoolPropertiesFormat.BackendIPConfigurations = &[]network.InterfaceIPConfiguration{}
- newBackendPools[i].Etag = nil
- lb.Etag = nil
- break
+ klog.V(2).Infof("bi.ReconcileBackendPools for service (%s) and vmSet (%s): ensuring the LB is decoupled from the VMSet", serviceName, vmSetName)
+ shouldRefreshLB, err = bi.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
+ if err != nil {
+ klog.Errorf("bi.ReconcileBackendPools for service (%s): failed to EnsureBackendPoolDeleted: %s", serviceName, err.Error())
+ return false, false, err
}
var nodeIPAddressesToBeDeleted []string
@@ -439,6 +495,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi
if err := bi.CreateOrUpdateLBBackendPool(lbName, bp); err != nil {
return false, false, fmt.Errorf("bi.ReconcileBackendPools for service (%s): lb backendpool - failed to update backend pool %s for load balancer %s: %w", serviceName, lbBackendPoolName, lbName, err)
}
+ shouldRefreshLB = true
}
}
break
@@ -447,6 +504,13 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi
}
}
+ if shouldRefreshLB {
+ lb, _, err = bi.getAzureLoadBalancer(lbName, cache.CacheReadTypeForceRefresh)
+ if err != nil {
+ return false, false, fmt.Errorf("bi.ReconcileBackendPools for service (%s): failed to get load balancer %s: %w", serviceName, lbName, err)
+ }
+ }
+
if !foundBackendPool {
isBackendPoolPreConfigured = newBackendPool(lb, isBackendPoolPreConfigured, bi.PreConfiguredBackendPoolLoadBalancerTypes, getServiceName(service), getBackendPoolName(clusterName, service))
changed = true
@@ -455,6 +519,39 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi
return isBackendPoolPreConfigured, changed, nil
}
+func (bi *backendPoolTypeNodeIP) GetBackendPrivateIPs(clusterName string, service *v1.Service, lb *network.LoadBalancer) ([]string, []string) {
+ serviceName := getServiceName(service)
+ lbBackendPoolName := getBackendPoolName(clusterName, service)
+ if lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.BackendAddressPools == nil {
+ return nil, nil
+ }
+
+ backendPrivateIPv4s, backendPrivateIPv6s := sets.NewString(), sets.NewString()
+ for _, bp := range *lb.BackendAddressPools {
+ if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) {
+ klog.V(10).Infof("bi.GetBackendPrivateIPs for service (%s): found wanted backendpool %s", serviceName, to.String(bp.Name))
+ if bp.BackendAddressPoolPropertiesFormat != nil && bp.LoadBalancerBackendAddresses != nil {
+ for _, backendAddress := range *bp.LoadBalancerBackendAddresses {
+ ipAddress := backendAddress.IPAddress
+ if ipAddress != nil {
+ klog.V(2).Infof("bi.GetBackendPrivateIPs for service (%s): lb backendpool - found private IP %q", serviceName, *ipAddress)
+ if utilnet.IsIPv4String(*ipAddress) {
+ backendPrivateIPv4s.Insert(*ipAddress)
+ } else {
+ backendPrivateIPv6s.Insert(*ipAddress)
+ }
+ } else {
+ klog.V(4).Infof("bi.GetBackendPrivateIPs for service (%s): lb backendpool - found null private IP")
+ }
+ }
+ }
+ } else {
+ klog.V(10).Infof("bi.GetBackendPrivateIPs for service (%s): found unmanaged backendpool %s", serviceName, to.String(bp.Name))
+ }
+ }
+ return backendPrivateIPv4s.List(), backendPrivateIPv6s.List()
+}
+
func newBackendPool(lb *network.LoadBalancer, isBackendPoolPreConfigured bool, preConfiguredBackendPoolLoadBalancerTypes, serviceName, lbBackendPoolName string) bool {
if isBackendPoolPreConfigured {
klog.V(2).Infof("newBackendPool for service (%s)(true): lb backendpool - PreConfiguredBackendPoolLoadBalancerTypes %s has been set but can not find corresponding backend pool, ignoring it",
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
index e53eac1b2120..538ed8d8470a 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
@@ -24,7 +24,7 @@ import (
"strconv"
"strings"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
v1 "k8s.io/api/core/v1"
@@ -37,7 +37,7 @@ import (
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
)
-//ManagedDiskController : managed disk controller struct
+// ManagedDiskController : managed disk controller struct
type ManagedDiskController struct {
common *controllerCommon
}
@@ -84,9 +84,11 @@ type ManagedDiskOptions struct {
BurstingEnabled *bool
// SubscriptionID - specify a different SubscriptionID
SubscriptionID string
+ // Location - specify a different location
+ Location string
}
-//CreateManagedDisk : create managed disk
+// CreateManagedDisk: create managed disk
func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *ManagedDiskOptions) (string, error) {
var err error
klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
@@ -139,7 +141,7 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *
if options.NetworkAccessPolicy != "" {
diskProperties.NetworkAccessPolicy = options.NetworkAccessPolicy
- if options.NetworkAccessPolicy == compute.NetworkAccessPolicyAllowPrivate {
+ if options.NetworkAccessPolicy == compute.AllowPrivate {
if options.DiskAccessID == nil {
return "", fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate")
}
@@ -151,26 +153,34 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *
}
}
- if diskSku == compute.DiskStorageAccountTypesUltraSSDLRS {
- diskIOPSReadWrite := int64(consts.DefaultDiskIOPSReadWrite)
- if options.DiskIOPSReadWrite != "" {
+ if diskSku == compute.UltraSSDLRS || diskSku == consts.PremiumV2LRS {
+ if options.DiskIOPSReadWrite == "" {
+ if diskSku == compute.UltraSSDLRS {
+ diskIOPSReadWrite := int64(consts.DefaultDiskIOPSReadWrite)
+ diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
+ }
+ } else {
v, err := strconv.Atoi(options.DiskIOPSReadWrite)
if err != nil {
return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %w", err)
}
- diskIOPSReadWrite = int64(v)
+ diskIOPSReadWrite := int64(v)
+ diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
}
- diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
- diskMBpsReadWrite := int64(consts.DefaultDiskMBpsReadWrite)
- if options.DiskMBpsReadWrite != "" {
+ if options.DiskMBpsReadWrite == "" {
+ if diskSku == compute.UltraSSDLRS {
+ diskMBpsReadWrite := int64(consts.DefaultDiskMBpsReadWrite)
+ diskProperties.DiskMBpsReadWrite = to.Int64Ptr(diskMBpsReadWrite)
+ }
+ } else {
v, err := strconv.Atoi(options.DiskMBpsReadWrite)
if err != nil {
return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %w", err)
}
- diskMBpsReadWrite = int64(v)
+ diskMBpsReadWrite := int64(v)
+ diskProperties.DiskMBpsReadWrite = to.Int64Ptr(diskMBpsReadWrite)
}
- diskProperties.DiskMBpsReadWrite = to.Int64Ptr(diskMBpsReadWrite)
if options.LogicalSectorSize != 0 {
klog.V(2).Infof("AzureDisk - requested LogicalSectorSize: %v", options.LogicalSectorSize)
@@ -211,8 +221,12 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *
diskProperties.MaxShares = &options.MaxShares
}
+ location := c.common.location
+ if options.Location != "" {
+ location = options.Location
+ }
model := compute.Disk{
- Location: &c.common.location,
+ Location: &location,
Tags: newTags,
Sku: &compute.DiskSku{
Name: diskSku,
@@ -267,7 +281,7 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *
return diskID, nil
}
-//DeleteManagedDisk : delete managed disk
+// DeleteManagedDisk : delete managed disk
func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI string) error {
resourceGroup, subsID, err := getInfoFromDiskURI(diskURI)
if err != nil {
@@ -349,7 +363,7 @@ func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string,
return newSizeQuant, nil
}
- if !supportOnlineResize && result.DiskProperties.DiskState != compute.DiskStateUnattached {
+ if !supportOnlineResize && result.DiskProperties.DiskState != compute.Unattached {
return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, to.String(result.ManagedBy))
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go
index 7821dd81e11b..9b446ae0b0fd 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go
@@ -92,3 +92,18 @@ func (mr *MockBackendPoolMockRecorder) ReconcileBackendPools(clusterName, servic
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileBackendPools", reflect.TypeOf((*MockBackendPool)(nil).ReconcileBackendPools), clusterName, service, lb)
}
+
+// GetBackendPrivateIPs mocks base method
+func (m *MockBackendPool) GetBackendPrivateIPs(clusterName string, service *v1.Service, lb *network.LoadBalancer) ([]string, []string) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBackendPrivateIPs", clusterName, service, lb)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].([]string)
+ return ret0, ret1
+}
+
+// GetBackendPrivateIPs indicates an expected call of GetBackendPrivateIPs
+func (mr *MockBackendPoolMockRecorder) GetBackendPrivateIPs(clusterName, service, lb interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackendPrivateIPs", reflect.TypeOf((*MockBackendPool)(nil).GetBackendPrivateIPs), clusterName, service, lb)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go
index 9af0bf47c183..be51095b08d7 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go
@@ -20,323 +20,343 @@ import (
context "context"
reflect "reflect"
- compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
azure "github.com/Azure/go-autorest/autorest/azure"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/api/core/v1"
types "k8s.io/apimachinery/pkg/types"
cloud_provider "k8s.io/cloud-provider"
+
cache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
)
-// MockVMSet is a mock of VMSet interface
+// MockVMSet is a mock of VMSet interface.
type MockVMSet struct {
ctrl *gomock.Controller
recorder *MockVMSetMockRecorder
}
-// MockVMSetMockRecorder is the mock recorder for MockVMSet
+// MockVMSetMockRecorder is the mock recorder for MockVMSet.
type MockVMSetMockRecorder struct {
mock *MockVMSet
}
-// NewMockVMSet creates a new mock instance
+// NewMockVMSet creates a new mock instance.
func NewMockVMSet(ctrl *gomock.Controller) *MockVMSet {
mock := &MockVMSet{ctrl: ctrl}
mock.recorder = &MockVMSetMockRecorder{mock}
return mock
}
-// EXPECT returns an object that allows the caller to indicate expected use
+// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVMSet) EXPECT() *MockVMSetMockRecorder {
return m.recorder
}
-// GetInstanceIDByNodeName mocks base method
-func (m *MockVMSet) GetInstanceIDByNodeName(name string) (string, error) {
+// AttachDisk mocks base method.
+func (m *MockVMSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetInstanceIDByNodeName", name)
- ret0, _ := ret[0].(string)
+ ret := m.ctrl.Call(m, "AttachDisk", ctx, nodeName, diskMap)
+ ret0, _ := ret[0].(*azure.Future)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetInstanceIDByNodeName indicates an expected call of GetInstanceIDByNodeName
-func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name interface{}) *gomock.Call {
+// AttachDisk indicates an expected call of AttachDisk.
+func (mr *MockVMSetMockRecorder) AttachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIDByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceIDByNodeName), name)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDisk", reflect.TypeOf((*MockVMSet)(nil).AttachDisk), ctx, nodeName, diskMap)
}
-// GetInstanceTypeByNodeName mocks base method
-func (m *MockVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
+// DeleteCacheForNode mocks base method.
+func (m *MockVMSet) DeleteCacheForNode(nodeName string) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetInstanceTypeByNodeName", name)
- ret0, _ := ret[0].(string)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret := m.ctrl.Call(m, "DeleteCacheForNode", nodeName)
+ ret0, _ := ret[0].(error)
+ return ret0
}
-// GetInstanceTypeByNodeName indicates an expected call of GetInstanceTypeByNodeName
-func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name interface{}) *gomock.Call {
+// DeleteCacheForNode indicates an expected call of DeleteCacheForNode.
+func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceTypeByNodeName), name)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCacheForNode", reflect.TypeOf((*MockVMSet)(nil).DeleteCacheForNode), nodeName)
}
-// GetIPByNodeName mocks base method
-func (m *MockVMSet) GetIPByNodeName(name string) (string, string, error) {
+// DetachDisk mocks base method.
+func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetIPByNodeName", name)
- ret0, _ := ret[0].(string)
- ret1, _ := ret[1].(string)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret := m.ctrl.Call(m, "DetachDisk", ctx, nodeName, diskMap)
+ ret0, _ := ret[0].(error)
+ return ret0
}
-// GetIPByNodeName indicates an expected call of GetIPByNodeName
-func (mr *MockVMSetMockRecorder) GetIPByNodeName(name interface{}) *gomock.Call {
+// DetachDisk indicates an expected call of DetachDisk.
+func (mr *MockVMSetMockRecorder) DetachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetIPByNodeName), name)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachDisk", reflect.TypeOf((*MockVMSet)(nil).DetachDisk), ctx, nodeName, diskMap)
}
-// GetPrimaryInterface mocks base method
-func (m *MockVMSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
+// EnsureBackendPoolDeleted mocks base method.
+func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) (bool, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetPrimaryInterface", nodeName)
- ret0, _ := ret[0].(network.Interface)
+ ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
+ ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetPrimaryInterface indicates an expected call of GetPrimaryInterface
-func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName interface{}) *gomock.Call {
+// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted.
+func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryInterface", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryInterface), nodeName)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
}
-// GetNodeNameByProviderID mocks base method
-func (m *MockVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
+// EnsureBackendPoolDeletedFromVMSets mocks base method.
+func (m *MockVMSet) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetNodeNameByProviderID", providerID)
- ret0, _ := ret[0].(types.NodeName)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret := m.ctrl.Call(m, "EnsureBackendPoolDeletedFromVMSets", vmSetNamesMap, backendPoolID)
+ ret0, _ := ret[0].(error)
+ return ret0
}
-// GetNodeNameByProviderID indicates an expected call of GetNodeNameByProviderID
-func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID interface{}) *gomock.Call {
+// EnsureBackendPoolDeletedFromVMSets indicates an expected call of EnsureBackendPoolDeletedFromVMSets.
+func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByProviderID), providerID)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeletedFromVMSets", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeletedFromVMSets), vmSetNamesMap, backendPoolID)
}
-// GetZoneByNodeName mocks base method
-func (m *MockVMSet) GetZoneByNodeName(name string) (cloud_provider.Zone, error) {
+// EnsureHostInPool mocks base method.
+func (m *MockVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID, vmSetName string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetZoneByNodeName", name)
- ret0, _ := ret[0].(cloud_provider.Zone)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret := m.ctrl.Call(m, "EnsureHostInPool", service, nodeName, backendPoolID, vmSetName)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(string)
+ ret2, _ := ret[2].(string)
+ ret3, _ := ret[3].(*compute.VirtualMachineScaleSetVM)
+ ret4, _ := ret[4].(error)
+ return ret0, ret1, ret2, ret3, ret4
}
-// GetZoneByNodeName indicates an expected call of GetZoneByNodeName
-func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name interface{}) *gomock.Call {
+// EnsureHostInPool indicates an expected call of EnsureHostInPool.
+func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetZoneByNodeName), name)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostInPool), service, nodeName, backendPoolID, vmSetName)
}
-// GetPrimaryVMSetName mocks base method
-func (m *MockVMSet) GetPrimaryVMSetName() string {
+// EnsureHostsInPool mocks base method.
+func (m *MockVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName string) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetPrimaryVMSetName")
- ret0, _ := ret[0].(string)
+ ret := m.ctrl.Call(m, "EnsureHostsInPool", service, nodes, backendPoolID, vmSetName)
+ ret0, _ := ret[0].(error)
return ret0
}
-// GetPrimaryVMSetName indicates an expected call of GetPrimaryVMSetName
-func (mr *MockVMSetMockRecorder) GetPrimaryVMSetName() *gomock.Call {
+// EnsureHostsInPool indicates an expected call of EnsureHostsInPool.
+func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryVMSetName))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName)
}
-// GetVMSetNames mocks base method
-func (m *MockVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]string, error) {
+// GetAgentPoolVMSetNames mocks base method.
+func (m *MockVMSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetVMSetNames", service, nodes)
+ ret := m.ctrl.Call(m, "GetAgentPoolVMSetNames", nodes)
ret0, _ := ret[0].(*[]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetVMSetNames indicates an expected call of GetVMSetNames
-func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes interface{}) *gomock.Call {
+// GetAgentPoolVMSetNames indicates an expected call of GetAgentPoolVMSetNames.
+func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetVMSetNames), service, nodes)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAgentPoolVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetAgentPoolVMSetNames), nodes)
}
-// GetNodeVMSetName mocks base method
-func (m *MockVMSet) GetNodeVMSetName(node *v1.Node) (string, error) {
+// GetDataDisks mocks base method.
+func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetNodeVMSetName", node)
+ ret := m.ctrl.Call(m, "GetDataDisks", nodeName, crt)
+ ret0, _ := ret[0].([]compute.DataDisk)
+ ret1, _ := ret[1].(*string)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// GetDataDisks indicates an expected call of GetDataDisks.
+func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataDisks", reflect.TypeOf((*MockVMSet)(nil).GetDataDisks), nodeName, crt)
+}
+
+// GetIPByNodeName mocks base method.
+func (m *MockVMSet) GetIPByNodeName(name string) (string, string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetIPByNodeName", name)
ret0, _ := ret[0].(string)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(string)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
-// GetNodeVMSetName indicates an expected call of GetNodeVMSetName
-func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node interface{}) *gomock.Call {
+// GetIPByNodeName indicates an expected call of GetIPByNodeName.
+func (mr *MockVMSetMockRecorder) GetIPByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetNodeVMSetName), node)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetIPByNodeName), name)
}
-// EnsureHostsInPool mocks base method
-func (m *MockVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName string) error {
+// GetInstanceIDByNodeName mocks base method.
+func (m *MockVMSet) GetInstanceIDByNodeName(name string) (string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "EnsureHostsInPool", service, nodes, backendPoolID, vmSetName)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "GetInstanceIDByNodeName", name)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
-// EnsureHostsInPool indicates an expected call of EnsureHostsInPool
-func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName interface{}) *gomock.Call {
+// GetInstanceIDByNodeName indicates an expected call of GetInstanceIDByNodeName.
+func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIDByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceIDByNodeName), name)
}
-// EnsureHostInPool mocks base method
-func (m *MockVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID, vmSetName string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
+// GetInstanceTypeByNodeName mocks base method.
+func (m *MockVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "EnsureHostInPool", service, nodeName, backendPoolID, vmSetName)
+ ret := m.ctrl.Call(m, "GetInstanceTypeByNodeName", name)
ret0, _ := ret[0].(string)
- ret1, _ := ret[1].(string)
- ret2, _ := ret[2].(string)
- ret3, _ := ret[3].(*compute.VirtualMachineScaleSetVM)
- ret4, _ := ret[4].(error)
- return ret0, ret1, ret2, ret3, ret4
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
-// EnsureHostInPool indicates an expected call of EnsureHostInPool
-func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName interface{}) *gomock.Call {
+// GetInstanceTypeByNodeName indicates an expected call of GetInstanceTypeByNodeName.
+func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostInPool), service, nodeName, backendPoolID, vmSetName)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceTypeByNodeName), name)
}
-// EnsureBackendPoolDeleted mocks base method
-func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
+// GetNodeCIDRMasksByProviderID mocks base method.
+func (m *MockVMSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "GetNodeCIDRMasksByProviderID", providerID)
+ ret0, _ := ret[0].(int)
+ ret1, _ := ret[1].(int)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
-// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted
-func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call {
+// GetNodeCIDRMasksByProviderID indicates an expected call of GetNodeCIDRMasksByProviderID.
+func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeCIDRMasksByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeCIDRMasksByProviderID), providerID)
}
-// EnsureBackendPoolDeletedFromVMSets mocks base method
-func (m *MockVMSet) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error {
+// GetNodeNameByIPConfigurationID mocks base method.
+func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "EnsureBackendPoolDeletedFromVMSets", vmSetNamesMap, backendPoolID)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "GetNodeNameByIPConfigurationID", ipConfigurationID)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(string)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
-// EnsureBackendPoolDeletedFromVMSets indicates an expected call of EnsureBackendPoolDeletedFromVMSets
-func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolID interface{}) *gomock.Call {
+// GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID.
+func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeletedFromVMSets", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeletedFromVMSets), vmSetNamesMap, backendPoolID)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByIPConfigurationID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByIPConfigurationID), ipConfigurationID)
}
-// AttachDisk mocks base method
-func (m *MockVMSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) {
+// GetNodeNameByProviderID mocks base method.
+func (m *MockVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AttachDisk", nodeName, diskMap)
- ret0, _ := ret[0].(*azure.Future)
+ ret := m.ctrl.Call(m, "GetNodeNameByProviderID", providerID)
+ ret0, _ := ret[0].(types.NodeName)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// AttachDisk indicates an expected call of AttachDisk
-func (mr *MockVMSetMockRecorder) AttachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call {
+// GetNodeNameByProviderID indicates an expected call of GetNodeNameByProviderID.
+func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDisk", reflect.TypeOf((*MockVMSet)(nil).AttachDisk), nodeName, diskMap)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByProviderID), providerID)
}
-// DetachDisk mocks base method
-func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error {
+// GetNodeVMSetName mocks base method.
+func (m *MockVMSet) GetNodeVMSetName(node *v1.Node) (string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DetachDisk", nodeName, diskMap)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "GetNodeVMSetName", node)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
-// DetachDisk indicates an expected call of DetachDisk
-func (mr *MockVMSetMockRecorder) DetachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call {
+// GetNodeVMSetName indicates an expected call of GetNodeVMSetName.
+func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachDisk", reflect.TypeOf((*MockVMSet)(nil).DetachDisk), nodeName, diskMap)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetNodeVMSetName), node)
}
-// WaitForUpdateResult mocks base method
-func (m *MockVMSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error {
+// GetPowerStatusByNodeName mocks base method.
+func (m *MockVMSet) GetPowerStatusByNodeName(name string) (string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "GetPowerStatusByNodeName", name)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
-// WaitForUpdateResult indicates an expected call of WaitForUpdateResult
-func (mr *MockVMSetMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call {
+// GetPowerStatusByNodeName indicates an expected call of GetPowerStatusByNodeName.
+func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockVMSet)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerStatusByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPowerStatusByNodeName), name)
}
-// GetDataDisks mocks base method
-func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
+// GetPrimaryInterface mocks base method.
+func (m *MockVMSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetDataDisks", nodeName, crt)
- ret0, _ := ret[0].([]compute.DataDisk)
- ret1, _ := ret[1].(*string)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret := m.ctrl.Call(m, "GetPrimaryInterface", nodeName)
+ ret0, _ := ret[0].(network.Interface)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
-// GetDataDisks indicates an expected call of GetDataDisks
-func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt interface{}) *gomock.Call {
+// GetPrimaryInterface indicates an expected call of GetPrimaryInterface.
+func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataDisks", reflect.TypeOf((*MockVMSet)(nil).GetDataDisks), nodeName, crt)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryInterface", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryInterface), nodeName)
}
-// UpdateVM mocks base method
-func (m *MockVMSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error {
+// GetPrimaryVMSetName mocks base method.
+func (m *MockVMSet) GetPrimaryVMSetName() string {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "UpdateVM", nodeName)
- ret0, _ := ret[0].(error)
+ ret := m.ctrl.Call(m, "GetPrimaryVMSetName")
+ ret0, _ := ret[0].(string)
return ret0
}
-// UpdateVM indicates an expected call of UpdateVM
-func (mr *MockVMSetMockRecorder) UpdateVM(ctx, nodeName interface{}) *gomock.Call {
+// GetPrimaryVMSetName indicates an expected call of GetPrimaryVMSetName.
+func (mr *MockVMSetMockRecorder) GetPrimaryVMSetName() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVM", reflect.TypeOf((*MockVMSet)(nil).UpdateVM), nodeName)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryVMSetName))
}
-// GetPowerStatusByNodeName mocks base method
-func (m *MockVMSet) GetPowerStatusByNodeName(name string) (string, error) {
+// GetPrivateIPsByNodeName mocks base method.
+func (m *MockVMSet) GetPrivateIPsByNodeName(name string) ([]string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetPowerStatusByNodeName", name)
- ret0, _ := ret[0].(string)
+ ret := m.ctrl.Call(m, "GetPrivateIPsByNodeName", name)
+ ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetPowerStatusByNodeName indicates an expected call of GetPowerStatusByNodeName
-func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name interface{}) *gomock.Call {
+// GetPrivateIPsByNodeName indicates an expected call of GetPrivateIPsByNodeName.
+func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerStatusByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPowerStatusByNodeName), name)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateIPsByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPrivateIPsByNodeName), name)
}
-// GetProvisioningStateByNodeName mocks base method
+// GetProvisioningStateByNodeName mocks base method.
func (m *MockVMSet) GetProvisioningStateByNodeName(name string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProvisioningStateByNodeName", name)
@@ -345,70 +365,66 @@ func (m *MockVMSet) GetProvisioningStateByNodeName(name string) (string, error)
return ret0, ret1
}
-// GetProvisioningStateByNodeName indicates an expected call of GetProvisioningStateByNodeName
+// GetProvisioningStateByNodeName indicates an expected call of GetProvisioningStateByNodeName.
func (mr *MockVMSetMockRecorder) GetProvisioningStateByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisioningStateByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetProvisioningStateByNodeName), name)
}
-// GetPrivateIPsByNodeName mocks base method
-func (m *MockVMSet) GetPrivateIPsByNodeName(name string) ([]string, error) {
+// GetVMSetNames mocks base method.
+func (m *MockVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetPrivateIPsByNodeName", name)
- ret0, _ := ret[0].([]string)
+ ret := m.ctrl.Call(m, "GetVMSetNames", service, nodes)
+ ret0, _ := ret[0].(*[]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetPrivateIPsByNodeName indicates an expected call of GetPrivateIPsByNodeName
-func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomock.Call {
+// GetVMSetNames indicates an expected call of GetVMSetNames.
+func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateIPsByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPrivateIPsByNodeName), name)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetVMSetNames), service, nodes)
}
-// GetNodeNameByIPConfigurationID mocks base method
-func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+// GetZoneByNodeName mocks base method.
+func (m *MockVMSet) GetZoneByNodeName(name string) (cloud_provider.Zone, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetNodeNameByIPConfigurationID", ipConfigurationID)
- ret0, _ := ret[0].(string)
- ret1, _ := ret[1].(string)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret := m.ctrl.Call(m, "GetZoneByNodeName", name)
+ ret0, _ := ret[0].(cloud_provider.Zone)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
-// GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID
-func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID interface{}) *gomock.Call {
+// GetZoneByNodeName indicates an expected call of GetZoneByNodeName.
+func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByIPConfigurationID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByIPConfigurationID), ipConfigurationID)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetZoneByNodeName), name)
}
-// GetNodeCIDRMasksByProviderID mocks base method
-func (m *MockVMSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
+// UpdateVM mocks base method.
+func (m *MockVMSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetNodeCIDRMasksByProviderID", providerID)
- ret0, _ := ret[0].(int)
- ret1, _ := ret[1].(int)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret := m.ctrl.Call(m, "UpdateVM", ctx, nodeName)
+ ret0, _ := ret[0].(error)
+ return ret0
}
-// GetNodeCIDRMasksByProviderID indicates an expected call of GetNodeCIDRMasksByProviderID
-func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID interface{}) *gomock.Call {
+// UpdateVM indicates an expected call of UpdateVM.
+func (mr *MockVMSetMockRecorder) UpdateVM(ctx, nodeName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeCIDRMasksByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeCIDRMasksByProviderID), providerID)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVM", reflect.TypeOf((*MockVMSet)(nil).UpdateVM), ctx, nodeName)
}
-// GetAgentPoolVMSetNames mocks base method
-func (m *MockVMSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
+// WaitForUpdateResult mocks base method.
+func (m *MockVMSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetAgentPoolVMSetNames", nodes)
- ret0, _ := ret[0].(*[]string)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, source)
+ ret0, _ := ret[0].(error)
+ return ret0
}
-// GetAgentPoolVMSetNames indicates an expected call of GetAgentPoolVMSetNames
-func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes interface{}) *gomock.Call {
+// WaitForUpdateResult indicates an expected call of WaitForUpdateResult.
+func (mr *MockVMSetMockRecorder) WaitForUpdateResult(ctx, future, nodeName, source interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAgentPoolVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetAgentPoolVMSetNames), nodes)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockVMSet)(nil).WaitForUpdateResult), ctx, future, source)
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go
index a4a6d4f9ebce..43c356a6f8ae 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go
@@ -47,8 +47,8 @@ func (az *Cloud) reconcilePrivateLinkService(
if createPLS {
// Firstly, make sure it's internal service
- if !requiresInternalLoadBalancer(service) {
- return fmt.Errorf("reconcilePrivateLinkService for service(%s): service requiring private link service must be internal", serviceName)
+ if !requiresInternalLoadBalancer(service) && !consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) {
+ return fmt.Errorf("reconcilePrivateLinkService for service(%s): service requiring private link service must be internal or disable floating ip", serviceName)
}
// Secondly, check if there is a private link service already created
@@ -518,8 +518,10 @@ func getPLSSubnetName(service *v1.Service) *string {
return &l
}
- if l, found := service.Annotations[consts.ServiceAnnotationLoadBalancerInternalSubnet]; found && strings.TrimSpace(l) != "" {
- return &l
+ if requiresInternalLoadBalancer(service) {
+ if l, found := service.Annotations[consts.ServiceAnnotationLoadBalancerInternalSubnet]; found && strings.TrimSpace(l) != "" {
+ return &l
+ }
}
return nil
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
index d3d768b02e20..63e924616bd6 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
@@ -70,8 +70,8 @@ func (op *delayedRouteOperation) wait() error {
// delayedRouteUpdater defines a delayed route updater, which batches all the
// route updating operations within "interval" period.
// Example usage:
-// op, err := updater.addRouteOperation(routeOperationAdd, route)
-// err = op.wait()
+// op, err := updater.addRouteOperation(routeOperationAdd, route)
+// err = op.wait()
type delayedRouteUpdater struct {
az *Cloud
interval time.Duration
@@ -369,7 +369,7 @@ func (az *Cloud) createRouteTable() error {
// route.Name will be ignored, although the cloud-provider may use nameHint
// to create a more user-meaningful name.
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
- mc := metrics.NewMetricContext("routes", "create_route", az.ResourceGroup, az.SubscriptionID, string(kubeRoute.TargetNode))
+ mc := metrics.NewMetricContext("routes", "create_route", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), string(kubeRoute.TargetNode))
isOperationSucceeded := false
defer func() {
mc.ObserveOperationWithResult(isOperationSucceeded)
@@ -448,7 +448,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
// DeleteRoute deletes the specified managed route
// Route should be as returned by ListRoutes
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
- mc := metrics.NewMetricContext("routes", "delete_route", az.ResourceGroup, az.SubscriptionID, string(kubeRoute.TargetNode))
+ mc := metrics.NewMetricContext("routes", "delete_route", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), string(kubeRoute.TargetNode))
isOperationSucceeded := false
defer func() {
mc.ObserveOperationWithResult(isOperationSucceeded)
@@ -552,7 +552,7 @@ func findFirstIPByFamily(ips []string, v6 bool) (string, error) {
return "", fmt.Errorf("no match found matching the ipfamily requested")
}
-//strips : . /
+// strips : . /
func cidrtoRfc1035(cidr string) string {
cidr = strings.ReplaceAll(cidr, ":", "")
cidr = strings.ReplaceAll(cidr, ".", "")
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
index 66b40f33658c..53cd282fbf39 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
@@ -30,7 +30,7 @@ import (
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
"github.com/Azure/go-autorest/autorest/to"
@@ -56,15 +56,6 @@ var (
vmasIDRE = regexp.MustCompile(`/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/availabilitySets/(.+)`)
)
-// getStandardMachineID returns the full identifier of a virtual machine.
-func (az *Cloud) getStandardMachineID(subscriptionID, resourceGroup, machineName string) string {
- return fmt.Sprintf(
- consts.MachineIDTemplate,
- subscriptionID,
- strings.ToLower(resourceGroup),
- machineName)
-}
-
// returns the full identifier of an availabilitySet
func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
return fmt.Sprintf(
@@ -106,7 +97,7 @@ func (az *Cloud) getLoadBalancerProbeID(lbName, rgName, lbRuleName string) strin
// getNetworkResourceSubscriptionID returns the subscription id which hosts network resources
func (az *Cloud) getNetworkResourceSubscriptionID() string {
- if az.Config.UsesNetworkResourceInDifferentTenantOrSubscription() {
+ if az.Config.UsesNetworkResourceInDifferentSubscription() {
return az.NetworkResourceSubscriptionID
}
return az.SubscriptionID
@@ -265,8 +256,8 @@ func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
// SingleStack -v4 (pre v1.16) => BackendPool name == clusterName
// SingleStack -v6 => BackendPool name == -IPv6 (all cluster bootstrap uses this name)
// DualStack
-// => IPv4 BackendPool name == clusterName
-// => IPv6 BackendPool name == -IPv6
+// => IPv4 BackendPool name == clusterName
+// => IPv6 BackendPool name == -IPv6
// This means:
// clusters moving from IPv4 to dualstack will require no changes
// clusters moving from IPv6 to dualstack will require no changes as the IPv4 backend pool will created with
@@ -352,7 +343,7 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv
return true, isPrimaryService, nil
}
- loadBalancerIP := service.Spec.LoadBalancerIP
+ loadBalancerIP := getServiceLoadBalancerIP(service)
if loadBalancerIP == "" {
// it is a must that the secondary services set the loadBalancer IP
return false, isPrimaryService, nil
@@ -434,7 +425,7 @@ outer:
var polyTable = crc32.MakeTable(crc32.Koopman)
-//MakeCRC32 : convert string to CRC32 format
+// MakeCRC32 : convert string to CRC32 format
func MakeCRC32(str string) string {
crc := crc32.New(polyTable)
_, _ = crc.Write([]byte(str))
@@ -449,9 +440,9 @@ type availabilitySet struct {
vmasCache *azcache.TimedCache
}
-type availabilitySetEntry struct {
- vmas *compute.AvailabilitySet
- resourceGroup string
+type AvailabilitySetEntry struct {
+ VMAS *compute.AvailabilitySet
+ ResourceGroup string
}
func (as *availabilitySet) newVMASCache() (*azcache.TimedCache, error) {
@@ -476,9 +467,9 @@ func (as *availabilitySet) newVMASCache() (*azcache.TimedCache, error) {
klog.Warning("failed to get the name of the VMAS")
continue
}
- localCache.Store(to.String(vmas.Name), &availabilitySetEntry{
- vmas: &vmas,
- resourceGroup: resourceGroup,
+ localCache.Store(to.String(vmas.Name), &AvailabilitySetEntry{
+ VMAS: &vmas,
+ ResourceGroup: resourceGroup,
})
}
}
@@ -534,9 +525,9 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
}
resourceID := *machine.ID
- convertedResourceID, err := convertResourceGroupNameToLower(resourceID)
+ convertedResourceID, err := ConvertResourceGroupNameToLower(resourceID)
if err != nil {
- klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err)
+ klog.Errorf("ConvertResourceGroupNameToLower failed with error: %v", err)
return "", err
}
return convertedResourceID, nil
@@ -1048,10 +1039,10 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
-func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
+func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) (bool, error) {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
- return nil
+ return false, nil
}
mc := metrics.NewMetricContext("services", "vmas_ensure_backend_pool_deleted", as.ResourceGroup, as.SubscriptionID, getServiceName(service))
@@ -1076,6 +1067,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
}
nicUpdaters := make([]func() error, 0)
allErrs := make([]error, 0)
+ var nicUpdated bool
for i := range ipConfigurationIDs {
ipConfigurationID := ipConfigurationIDs[i]
nodeName, _, err := as.GetNodeNameByIPConfigurationID(ipConfigurationID)
@@ -1093,15 +1085,15 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
if err != nil {
if errors.Is(err, errNotInVMSet) {
klog.V(3).Infof("EnsureBackendPoolDeleted skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
- return nil
+ return false, nil
}
klog.Errorf("error: az.EnsureBackendPoolDeleted(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
- return err
+ return false, err
}
vmasName, err := getAvailabilitySetNameByID(vmasID)
if err != nil {
- return fmt.Errorf("EnsureBackendPoolDeleted: failed to parse the VMAS ID %s: %w", vmasID, err)
+ return false, fmt.Errorf("EnsureBackendPoolDeleted: failed to parse the VMAS ID %s: %w", vmasID, err)
}
// Only remove nodes belonging to specified vmSet to basic LB backends.
if !strings.EqualFold(vmasName, vmSetName) {
@@ -1111,7 +1103,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
if nic.ProvisioningState == consts.NicFailedState {
klog.Warningf("EnsureBackendPoolDeleted skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
- return nil
+ return false, nil
}
if nic.InterfacePropertiesFormat != nil && nic.InterfacePropertiesFormat.IPConfigurations != nil {
@@ -1143,21 +1135,22 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", as.resourceGroup, to.String(nic.Name), rerr.Error())
return rerr.Error()
}
+ nicUpdated = true
return nil
})
}
}
errs := utilerrors.AggregateGoroutines(nicUpdaters...)
if errs != nil {
- return utilerrors.Flatten(errs)
+ return nicUpdated, utilerrors.Flatten(errs)
}
// Fail if there are other errors.
if len(allErrs) > 0 {
- return utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
+ return nicUpdated, utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
}
isOperationSucceeded = true
- return nil
+ return nicUpdated, nil
}
func getAvailabilitySetNameByID(asID string) (string, error) {
@@ -1230,7 +1223,7 @@ func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID stri
asName, err := getAvailabilitySetNameByID(asID)
if err != nil {
- return "", "", fmt.Errorf("cannot get the availability set name by the availability set ID %s: %v", asID, err)
+ return "", "", fmt.Errorf("cannot get the availability set name by the availability set ID %s: %w", asID, err)
}
return vmName, strings.ToLower(asName), nil
}
@@ -1249,8 +1242,8 @@ func (as *availabilitySet) getAvailabilitySetByNodeName(nodeName string, crt azc
var result *compute.AvailabilitySet
vmasList.Range(func(_, value interface{}) bool {
- vmasEntry := value.(*availabilitySetEntry)
- vmas := vmasEntry.vmas
+ vmasEntry := value.(*AvailabilitySetEntry)
+ vmas := vmasEntry.VMAS
if vmas != nil && vmas.AvailabilitySetProperties != nil && vmas.VirtualMachines != nil {
for _, vmIDRef := range *vmas.VirtualMachines {
if vmIDRef.ID != nil {
@@ -1316,7 +1309,7 @@ func (as *availabilitySet) GetNodeCIDRMasksByProviderID(providerID string) (int,
return ipv4Mask, ipv6Mask, nil
}
-//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMAS
+// EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMAS
func (as *availabilitySet) EnsureBackendPoolDeletedFromVMSets(vmasNamesMap map[string]bool, backendPoolID string) error {
return nil
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go
index 1d00b965978d..436cd9859d01 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go
@@ -20,7 +20,7 @@ import (
"context"
"fmt"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"k8s.io/klog/v2"
@@ -40,6 +40,9 @@ func (az *Cloud) CreateFileShare(ctx context.Context, accountOptions *AccountOpt
if accountOptions.ResourceGroup == "" {
accountOptions.ResourceGroup = az.resourceGroup
}
+ if accountOptions.SubscriptionID == "" {
+ accountOptions.SubscriptionID = az.subscriptionID
+ }
accountOptions.EnableHTTPSTrafficOnly = true
if shareOptions.Protocol == storage.EnabledProtocolsNFS {
@@ -51,7 +54,7 @@ func (az *Cloud) CreateFileShare(ctx context.Context, accountOptions *AccountOpt
return "", "", fmt.Errorf("could not get storage key for storage account %s: %w", accountOptions.Name, err)
}
- if err := az.createFileShare(accountOptions.ResourceGroup, accountName, shareOptions); err != nil {
+ if err := az.createFileShare(ctx, accountOptions.SubscriptionID, accountOptions.ResourceGroup, accountName, shareOptions); err != nil {
return "", "", fmt.Errorf("failed to create share %s in account %s: %w", shareOptions.Name, accountName, err)
}
klog.V(4).Infof("created share %s in account %s", shareOptions.Name, accountOptions.Name)
@@ -59,8 +62,8 @@ func (az *Cloud) CreateFileShare(ctx context.Context, accountOptions *AccountOpt
}
// DeleteFileShare deletes a file share using storage account name and key
-func (az *Cloud) DeleteFileShare(resourceGroup, accountName, shareName string) error {
- if err := az.deleteFileShare(resourceGroup, accountName, shareName); err != nil {
+func (az *Cloud) DeleteFileShare(ctx context.Context, subsID, resourceGroup, accountName, shareName string) error {
+ if err := az.deleteFileShare(ctx, subsID, resourceGroup, accountName, shareName); err != nil {
return err
}
klog.V(4).Infof("share %s deleted", shareName)
@@ -68,11 +71,11 @@ func (az *Cloud) DeleteFileShare(resourceGroup, accountName, shareName string) e
}
// ResizeFileShare resizes a file share
-func (az *Cloud) ResizeFileShare(resourceGroup, accountName, name string, sizeGiB int) error {
- return az.resizeFileShare(resourceGroup, accountName, name, sizeGiB)
+func (az *Cloud) ResizeFileShare(ctx context.Context, subsID, resourceGroup, accountName, name string, sizeGiB int) error {
+ return az.resizeFileShare(ctx, subsID, resourceGroup, accountName, name, sizeGiB)
}
// GetFileShare gets a file share
-func (az *Cloud) GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
- return az.getFileShare(resourceGroupName, accountName, name)
+func (az *Cloud) GetFileShare(ctx context.Context, subsID, resourceGroupName, accountName, name string) (storage.FileShare, error) {
+ return az.getFileShare(ctx, subsID, resourceGroupName, accountName, name)
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
index cc04f3e744fe..bbde88ad9c1a 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
@@ -21,10 +21,10 @@ import (
"fmt"
"strings"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
- "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/klog/v2"
@@ -52,11 +52,18 @@ type AccountOptions struct {
IsHnsEnabled *bool
EnableNfsV3 *bool
AllowBlobPublicAccess *bool
+ RequireInfrastructureEncryption *bool
+ AllowSharedKeyAccess *bool
+ IsMultichannelEnabled *bool
+ KeyName *string
+ KeyVersion *string
+ KeyVaultURI *string
Tags map[string]string
VirtualNetworkResourceIDs []string
VNetResourceGroup string
VNetName string
SubnetName string
+ AccessTier string
MatchTags bool
}
@@ -198,15 +205,21 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
}
if accountOptions.CreatePrivateEndpoint {
- // Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup
- if err := az.createPrivateDNSZone(ctx, vnetResourceGroup); err != nil {
- return "", "", fmt.Errorf("Failed to create private DNS zone(%s) in resourceGroup(%s), error: %v", PrivateDNSZoneName, vnetResourceGroup, err)
+ if _, err := az.privatednsclient.Get(ctx, vnetResourceGroup, PrivateDNSZoneName); err != nil {
+ klog.V(2).Infof("get private dns zone %s returned with %v", PrivateDNSZoneName, err.Error())
+ // Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup
+ if err := az.createPrivateDNSZone(ctx, vnetResourceGroup); err != nil {
+ return "", "", fmt.Errorf("create private DNS zone(%s) in resourceGroup(%s): %w", PrivateDNSZoneName, vnetResourceGroup, err)
+ }
}
// Create virtual link to the private DNS zone
vNetLinkName := accountName + "-vnetlink"
- if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup, vnetName); err != nil {
- return "", "", fmt.Errorf("Failed to create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s), error: %v", vnetName, PrivateDNSZoneName, vnetResourceGroup, err)
+ if _, err := az.virtualNetworkLinksClient.Get(ctx, vnetResourceGroup, PrivateDNSZoneName, vNetLinkName); err != nil {
+ klog.V(2).Infof("get virtual link for vnet(%s) and DNS Zone(%s) returned with %v", vnetName, PrivateDNSZoneName, err.Error())
+ if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup, vnetName); err != nil {
+ return "", "", fmt.Errorf("create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s): %w", vnetName, PrivateDNSZoneName, vnetResourceGroup, err)
+ }
}
}
@@ -273,6 +286,36 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
klog.V(2).Infof("set AllowBlobPublicAccess(%v) for storage account(%s)", *accountOptions.AllowBlobPublicAccess, accountName)
cp.AccountPropertiesCreateParameters.AllowBlobPublicAccess = accountOptions.AllowBlobPublicAccess
}
+ if accountOptions.RequireInfrastructureEncryption != nil {
+ klog.V(2).Infof("set RequireInfrastructureEncryption(%v) for storage account(%s)", *accountOptions.RequireInfrastructureEncryption, accountName)
+ cp.AccountPropertiesCreateParameters.Encryption = &storage.Encryption{
+ RequireInfrastructureEncryption: accountOptions.RequireInfrastructureEncryption,
+ KeySource: storage.KeySourceMicrosoftStorage,
+ Services: &storage.EncryptionServices{
+ File: &storage.EncryptionService{Enabled: to.BoolPtr(true)},
+ Blob: &storage.EncryptionService{Enabled: to.BoolPtr(true)},
+ },
+ }
+ }
+ if accountOptions.AllowSharedKeyAccess != nil {
+ klog.V(2).Infof("set Allow SharedKeyAccess (%v) for storage account (%s)", *accountOptions.AllowSharedKeyAccess, accountName)
+ cp.AccountPropertiesCreateParameters.AllowSharedKeyAccess = accountOptions.AllowSharedKeyAccess
+ }
+ if accountOptions.KeyVaultURI != nil {
+ klog.V(2).Infof("set KeyVault(%v) for storage account(%s)", accountOptions.KeyVaultURI, accountName)
+ cp.AccountPropertiesCreateParameters.Encryption = &storage.Encryption{
+ KeyVaultProperties: &storage.KeyVaultProperties{
+ KeyName: accountOptions.KeyName,
+ KeyVersion: accountOptions.KeyVersion,
+ KeyVaultURI: accountOptions.KeyVaultURI,
+ },
+ KeySource: storage.KeySourceMicrosoftKeyvault,
+ Services: &storage.EncryptionServices{
+ File: &storage.EncryptionService{Enabled: to.BoolPtr(true)},
+ Blob: &storage.EncryptionService{Enabled: to.BoolPtr(true)},
+ },
+ }
+ }
if az.StorageAccountClient == nil {
return "", "", fmt.Errorf("StorageAccountClient is nil")
}
@@ -281,21 +324,34 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
return "", "", fmt.Errorf("failed to create storage account %s, error: %v", accountName, rerr)
}
- if accountOptions.DisableFileServiceDeleteRetentionPolicy {
- klog.V(2).Infof("disable DisableFileServiceDeleteRetentionPolicy on account(%s), resource group(%s)", accountName, resourceGroup)
- prop, err := az.FileClient.GetServiceProperties(resourceGroup, accountName)
+ if accountOptions.DisableFileServiceDeleteRetentionPolicy || to.Bool(accountOptions.IsMultichannelEnabled) {
+ prop, err := az.FileClient.WithSubscriptionID(subsID).GetServiceProperties(ctx, resourceGroup, accountName)
if err != nil {
return "", "", err
}
if prop.FileServicePropertiesProperties == nil {
- return "", "", fmt.Errorf("FileServicePropertiesProperties of account(%s), resource group(%s) is nil", accountName, resourceGroup)
+ return "", "", fmt.Errorf("FileServicePropertiesProperties of account(%s), subscription(%s), resource group(%s) is nil", accountName, subsID, resourceGroup)
+ }
+ prop.FileServicePropertiesProperties.ProtocolSettings = nil
+ prop.FileServicePropertiesProperties.Cors = nil
+ if accountOptions.DisableFileServiceDeleteRetentionPolicy {
+ klog.V(2).Infof("disable FileServiceDeleteRetentionPolicy on account(%s), subscription(%s), resource group(%s)", accountName, subsID, resourceGroup)
+ prop.FileServicePropertiesProperties.ShareDeleteRetentionPolicy = &storage.DeleteRetentionPolicy{Enabled: to.BoolPtr(false)}
+ }
+ if to.Bool(accountOptions.IsMultichannelEnabled) {
+ klog.V(2).Infof("enable SMB Multichannel setting on account(%s), subscription(%s), resource group(%s)", accountName, subsID, resourceGroup)
+ prop.FileServicePropertiesProperties.ProtocolSettings = &storage.ProtocolSettings{Smb: &storage.SmbSetting{Multichannel: &storage.Multichannel{Enabled: to.BoolPtr(true)}}}
}
- prop.FileServicePropertiesProperties.ShareDeleteRetentionPolicy = &storage.DeleteRetentionPolicy{Enabled: to.BoolPtr(false)}
- if _, err := az.FileClient.SetServiceProperties(resourceGroup, accountName, prop); err != nil {
+ if _, err := az.FileClient.WithSubscriptionID(subsID).SetServiceProperties(ctx, resourceGroup, accountName, prop); err != nil {
return "", "", err
}
}
+ if accountOptions.AccessTier != "" {
+ klog.V(2).Infof("set AccessTier(%s) on account(%s), subscription(%s), resource group(%s)", accountOptions.AccessTier, accountName, subsID, resourceGroup)
+ cp.AccountPropertiesCreateParameters.AccessTier = storage.AccessTier(accountOptions.AccessTier)
+ }
+
if accountOptions.CreatePrivateEndpoint {
// Get properties of the storageAccount
storageAccount, err := az.StorageAccountClient.GetProperties(ctx, subsID, resourceGroup, accountName)
@@ -306,13 +362,13 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
// Create private endpoint
privateEndpointName := accountName + "-pvtendpoint"
if err := az.createPrivateEndpoint(ctx, accountName, storageAccount.ID, privateEndpointName, vnetResourceGroup, vnetName, subnetName, location); err != nil {
- return "", "", fmt.Errorf("Failed to create private endpoint for storage account(%s), resourceGroup(%s), error: %v", accountName, vnetResourceGroup, err)
+ return "", "", fmt.Errorf("create private endpoint for storage account(%s), resourceGroup(%s): %w", accountName, vnetResourceGroup, err)
}
// Create dns zone group
dnsZoneGroupName := accountName + "-dnszonegroup"
if err := az.createPrivateDNSZoneGroup(ctx, dnsZoneGroupName, privateEndpointName, vnetResourceGroup, vnetName); err != nil {
- return "", "", fmt.Errorf("Failed to create private DNS zone group - privateEndpoint(%s), vNetName(%s), resourceGroup(%s), error: %v", privateEndpointName, vnetName, vnetResourceGroup, err)
+ return "", "", fmt.Errorf("create private DNS zone group - privateEndpoint(%s), vNetName(%s), resourceGroup(%s): %w", privateEndpointName, vnetName, vnetResourceGroup, err)
}
}
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
index 3a17ac890681..e3542f35c914 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
@@ -31,6 +31,7 @@ import (
"k8s.io/klog/v2"
utilnet "k8s.io/utils/net"
+ azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
)
@@ -261,22 +262,6 @@ func getNodePrivateIPAddresses(node *v1.Node) []string {
return addresses
}
-func isLBBackendPoolTypeIPConfig(service *v1.Service, lb *network.LoadBalancer, clusterName string) bool {
- if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.BackendAddressPools == nil {
- klog.V(4).Infof("isLBBackendPoolTypeIPConfig: no backend pools in the LB %s", to.String(lb.Name))
- return false
- }
- lbBackendPoolName := getBackendPoolName(clusterName, service)
- for _, bp := range *lb.BackendAddressPools {
- if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) {
- return bp.BackendAddressPoolPropertiesFormat != nil &&
- bp.BackendIPConfigurations != nil &&
- len(*bp.BackendIPConfigurations) != 0
- }
- }
- return false
-}
-
func getBoolValueFromServiceAnnotations(service *v1.Service, key string) bool {
if l, found := service.Annotations[key]; found {
return strings.EqualFold(strings.TrimSpace(l), consts.TrueAnnotationValue)
@@ -300,3 +285,56 @@ func sameContentInSlices(s1 []string, s2 []string) bool {
}
return true
}
+
+func removeDuplicatedSecurityRules(rules []network.SecurityRule) []network.SecurityRule {
+ ruleNames := make(map[string]bool)
+ for i := len(rules) - 1; i >= 0; i-- {
+ if _, ok := ruleNames[to.String(rules[i].Name)]; ok {
+ klog.Warningf("Found duplicated rule %s, will be removed.", to.String(rules[i].Name))
+ rules = append(rules[:i], rules[i+1:]...)
+ }
+ ruleNames[to.String(rules[i].Name)] = true
+ }
+ return rules
+}
+
+func getVMSSVMCacheKey(resourceGroup, vmssName string) string {
+ cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName))
+ return cacheKey
+}
+
+// isNodeInVMSSVMCache check whether nodeName is in vmssVMCache
+func isNodeInVMSSVMCache(nodeName string, vmssVMCache *azcache.TimedCache) bool {
+ if vmssVMCache == nil {
+ return false
+ }
+
+ var isInCache bool
+
+ vmssVMCache.Lock.Lock()
+ defer vmssVMCache.Lock.Unlock()
+
+ for _, entry := range vmssVMCache.Store.List() {
+ if entry != nil {
+ e := entry.(*azcache.AzureCacheEntry)
+ e.Lock.Lock()
+ data := e.Data
+ if data != nil {
+ data.(*sync.Map).Range(func(vmName, _ interface{}) bool {
+ if vmName != nil && vmName.(string) == nodeName {
+ isInCache = true
+ return false
+ }
+ return true
+ })
+ }
+ e.Lock.Unlock()
+ }
+
+ if isInCache {
+ break
+ }
+ }
+
+ return isInCache
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
index 9ebe27533804..d829587960b6 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
@@ -19,7 +19,7 @@ package provider
import (
"context"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
"github.com/Azure/go-autorest/autorest/azure"
@@ -71,7 +71,7 @@ type VMSet interface {
// participating in the specified LoadBalancer Backend Pool.
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
- EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
+ EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) (bool, error)
//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS/VMAS
EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error
@@ -80,7 +80,7 @@ type VMSet interface {
// DetachDisk detaches a disk from vm
DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error
// WaitForUpdateResult waits for the response of the update request
- WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error
+ WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error
// GetDataDisks gets a list of data disks attached to the node.
GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error)
@@ -105,4 +105,7 @@ type VMSet interface {
// GetAgentPoolVMSetNames returns all vmSet names according to the nodes
GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error)
+
+ // DeleteCacheForNode removes the node entry from cache.
+ DeleteCacheForNode(nodeName string) error
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
index f4e26f6e5693..94b2201577af 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
@@ -17,6 +17,7 @@ limitations under the License.
package provider
import (
+ "context"
"errors"
"fmt"
"regexp"
@@ -24,7 +25,7 @@ import (
"strings"
"sync"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
"github.com/Azure/go-autorest/autorest/to"
@@ -44,12 +45,15 @@ import (
var (
// ErrorNotVmssInstance indicates an instance is not belonging to any vmss.
ErrorNotVmssInstance = errors.New("not a vmss instance")
-
- scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
- resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
- vmssIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces(?:.*)`)
- vmssPIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces/(.+)/ipConfigurations/(.+)/publicIPAddresses/(.+)`)
- vmssVMProviderIDRE = regexp.MustCompile(`azure:///subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(?:\d+)`)
+ ErrScaleSetNotFound = errors.New("scale set not found")
+
+ scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
+ resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
+ vmssIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces(?:.*)`)
+ vmssPIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces/(.+)/ipConfigurations/(.+)/publicIPAddresses/(.+)`)
+ vmssVMResourceIDTemplate = `/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(?:\d+)`
+ vmssVMResourceIDRE = regexp.MustCompile(vmssVMResourceIDTemplate)
+ vmssVMProviderIDRE = regexp.MustCompile(fmt.Sprintf("%s%s", "azure://", vmssVMResourceIDTemplate))
)
// vmssMetaInfo contains the metadata for a VMSS.
@@ -74,15 +78,34 @@ type ScaleSet struct {
// this also allows for clusters with both VM and VMSS nodes.
availabilitySet VMSet
- vmssCache *azcache.TimedCache
- vmssVMCache *sync.Map // [resourcegroup/vmssname]*azcache.TimedCache
- availabilitySetNodesCache *azcache.TimedCache
+ // flexScaleSet is required for self hosted K8s cluster (for example, capz)
+ // It is also used when there are vmssflex node and other types of node in
+ // the same cluster.
+ flexScaleSet VMSet
+
+ // vmssCache is timed cache where the Store in the cache is a map of
+ // Key: consts.VMSSKey
+ // Value: sync.Map of [vmssName]*VMSSEntry
+ vmssCache *azcache.TimedCache
+
+ // vmssVMCache is timed cache where the Store in the cache is a map of
+ // Key: [resourcegroup/vmssName]
+ // Value: sync.Map of [vmName]*VMSSVirtualMachineEntry
+ vmssVMCache *azcache.TimedCache
+
+ // nonVmssUniformNodesCache is used to store node names from non uniform vm.
+ // Currently, the nodes can from avset or vmss flex or individual vm.
+ // This cache contains an entry called nonVmssUniformNodesEntry.
+ // nonVmssUniformNodesEntry contains avSetVMNodeNames list, clusterNodeNames list
+ // and current clusterNodeNames.
+ nonVmssUniformNodesCache *azcache.TimedCache
+
// lockMap in cache refresh
lockMap *lockMap
}
// newScaleSet creates a new ScaleSet.
-func newScaleSet(az *Cloud) (VMSet, error) {
+func newScaleSet(ctx context.Context, az *Cloud) (VMSet, error) {
if az.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 {
az.Config.VmssVirtualMachinesCacheTTLInSeconds = consts.VMSSVirtualMachinesCacheTTLDefaultInSeconds
}
@@ -92,21 +115,31 @@ func newScaleSet(az *Cloud) (VMSet, error) {
if err != nil {
return nil, err
}
+ fs, err := newFlexScaleSet(ctx, az)
+ if err != nil {
+ return nil, err
+ }
+
ss := &ScaleSet{
Cloud: az,
availabilitySet: as,
- vmssVMCache: &sync.Map{},
+ flexScaleSet: fs,
lockMap: newLockMap(),
}
- if !ss.DisableAvailabilitySetNodes {
- ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
+ if !ss.DisableAvailabilitySetNodes || ss.EnableVmssFlexNodes {
+ ss.nonVmssUniformNodesCache, err = ss.newNonVmssUniformNodesCache()
if err != nil {
return nil, err
}
}
- ss.vmssCache, err = ss.newVMSSCache()
+ ss.vmssCache, err = ss.newVMSSCache(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache()
if err != nil {
return nil, err
}
@@ -120,11 +153,10 @@ func (ss *ScaleSet) getVMSS(vmssName string, crt azcache.AzureCacheReadType) (*c
if err != nil {
return nil, err
}
-
vmsses := cached.(*sync.Map)
if vmss, ok := vmsses.Load(vmssName); ok {
- result := vmss.(*vmssEntry)
- return result.vmss, nil
+ result := vmss.(*VMSSEntry)
+ return result.VMSS, nil
}
return nil, nil
@@ -154,55 +186,50 @@ func (ss *ScaleSet) getVMSS(vmssName string, crt azcache.AzureCacheReadType) (*c
// getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache.
// Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity.
func (ss *ScaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, error) {
- cacheKey, cache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
+ // FIXME(ccc): check only if vmss is uniform.
+ _, err := getScaleSetVMInstanceID(node.nodeName)
if err != nil {
return nil, err
}
- getter := func(nodeName string, crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, bool, error) {
+ getter := func(crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, bool, error) {
var found bool
- cached, err := cache.Get(cacheKey, crt)
+ virtualMachines, err := ss.getVMSSVMsFromCache(node.resourceGroup, node.vmssName, crt)
if err != nil {
return nil, found, err
}
- virtualMachines := cached.(*sync.Map)
- if entry, ok := virtualMachines.Load(nodeName); ok {
- result := entry.(*vmssVirtualMachinesEntry)
- if result.virtualMachine == nil {
- klog.Warningf("failed to get VM with vmssVirtualMachinesEntry on Node %q", nodeName)
- return nil, false, nil
+ if entry, ok := virtualMachines.Load(node.nodeName); ok {
+ result := entry.(*VMSSVirtualMachineEntry)
+ if result.VirtualMachine == nil {
+ klog.Warningf("VM is nil on Node %q, VM is in deleting state", node.nodeName)
+ return nil, true, nil
}
found = true
- return virtualmachine.FromVirtualMachineScaleSetVM(result.virtualMachine, virtualmachine.ByVMSS(result.vmssName)), found, nil
+ return virtualmachine.FromVirtualMachineScaleSetVM(result.VirtualMachine, virtualmachine.ByVMSS(result.VMSSName)), found, nil
}
return nil, found, nil
}
- // FIXME(ccc): check only if vmss is uniform.
- _, err = getScaleSetVMInstanceID(node.nodeName)
- if err != nil {
- return nil, err
- }
-
- vm, found, err := getter(node.nodeName, crt)
+ vm, found, err := getter(crt)
if err != nil {
return nil, err
}
if !found {
+ cacheKey := getVMSSVMCacheKey(node.resourceGroup, node.vmssName)
// lock and try find nodeName from cache again, refresh cache if still not found
ss.lockMap.LockEntry(cacheKey)
defer ss.lockMap.UnlockEntry(cacheKey)
- vm, found, err = getter(node.nodeName, crt)
+ vm, found, err = getter(crt)
if err == nil && found && vm != nil {
klog.V(2).Infof("found VMSS VM with nodeName %s after retry", node.nodeName)
return vm, nil
}
klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache(vmss: %s, rg: %s)", node.nodeName, node.vmssName, node.resourceGroup)
- vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh)
+ vm, found, err = getter(azcache.CacheReadTypeForceRefresh)
if err != nil {
return nil, err
}
@@ -232,16 +259,21 @@ func (ss *ScaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (
// GetPowerStatusByNodeName returns the power state of the specified node.
func (ss *ScaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(name, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return "", err
}
- if managedByAS {
+
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet.GetPowerStatusByNodeName(name)
}
-
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetPowerStatusByNodeName(name)
+ }
+ // VM is managed by vmss
vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
if err != nil {
return powerState, err
@@ -267,15 +299,20 @@ func (ss *ScaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
func (ss *ScaleSet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) {
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(name, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return "", err
}
- if managedByAS {
+
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet.GetProvisioningStateByNodeName(name)
}
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetProvisioningStateByNodeName(name)
+ }
vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
if err != nil {
@@ -292,24 +329,18 @@ func (ss *ScaleSet) GetProvisioningStateByNodeName(name string) (provisioningSta
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets.
func (ss *ScaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
- cacheKey, cache, err := ss.getVMSSVMCache(resourceGroup, scaleSetName)
- if err != nil {
- return nil, err
- }
-
getter := func(crt azcache.AzureCacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
- cached, err := cache.Get(cacheKey, crt)
+ virtualMachines, err := ss.getVMSSVMsFromCache(resourceGroup, scaleSetName, crt)
if err != nil {
return nil, false, err
}
- virtualMachines := cached.(*sync.Map)
virtualMachines.Range(func(key, value interface{}) bool {
- vmEntry := value.(*vmssVirtualMachinesEntry)
- if strings.EqualFold(vmEntry.resourceGroup, resourceGroup) &&
- strings.EqualFold(vmEntry.vmssName, scaleSetName) &&
- strings.EqualFold(vmEntry.instanceID, instanceID) {
- vm = vmEntry.virtualMachine
+ vmEntry := value.(*VMSSVirtualMachineEntry)
+ if strings.EqualFold(vmEntry.ResourceGroup, resourceGroup) &&
+ strings.EqualFold(vmEntry.VMSSName, scaleSetName) &&
+ strings.EqualFold(vmEntry.InstanceID, instanceID) {
+ vm = vmEntry.VirtualMachine
found = true
return false
}
@@ -352,26 +383,37 @@ func (ss *ScaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
// not exist or is no longer running.
func (ss *ScaleSet) GetInstanceIDByNodeName(name string) (string, error) {
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(name, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return "", err
}
- if managedByAS {
+
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet.GetInstanceIDByNodeName(name)
}
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetInstanceIDByNodeName(name)
+ }
vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe)
if err != nil {
+ // special case: during scaling in, if the vm is deleted and nonVmssUniformNodesCache is refreshed,
+ // then getVMManagementTypeByNodeName will return ManagedByVmssUniform no matter what the actual managementType is.
+ // In this case, if it is actually a non vmss uniform node, return InstanceNotFound
+ if errors.Is(err, ErrorNotVmssInstance) {
+ return "", cloudprovider.InstanceNotFound
+ }
klog.Errorf("Unable to find node %s: %v", name, err)
return "", err
}
resourceID := vm.ID
- convertedResourceID, err := convertResourceGroupNameToLower(resourceID)
+ convertedResourceID, err := ConvertResourceGroupNameToLower(resourceID)
if err != nil {
- klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err)
+ klog.Errorf("ConvertResourceGroupNameToLower failed with error: %v", err)
return "", err
}
return convertedResourceID, nil
@@ -379,18 +421,32 @@ func (ss *ScaleSet) GetInstanceIDByNodeName(name string) (string, error) {
// GetNodeNameByProviderID gets the node name by provider ID.
// providerID example:
-// 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0
-// 2. vmss providerID:
-// azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1
-// /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1
+// 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0
+// 2. vmss providerID:
+// azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1
+// /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1
func (ss *ScaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
- // NodeName is not part of providerID for vmss instances.
- scaleSetName, err := extractScaleSetNameByProviderID(providerID)
+
+ vmManagementType, err := ss.getVMManagementTypeByProviderID(providerID, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is managed by availability set: %v", providerID, err)
+ klog.Errorf("Failed to check VM management type: %v", err)
+ return "", err
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
}
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetNodeNameByProviderID(providerID)
+ }
+ // NodeName is not part of providerID for vmss instances.
+ scaleSetName, err := extractScaleSetNameByProviderID(providerID)
+ if err != nil {
+ return "", fmt.Errorf("error of extracting vmss name for node %q", providerID)
+ }
resourceGroup, err := extractResourceGroupByProviderID(providerID)
if err != nil {
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
@@ -426,15 +482,20 @@ func (ss *ScaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
// GetInstanceTypeByNodeName gets the instance type by node name.
func (ss *ScaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(name, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return "", err
}
- if managedByAS {
+
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
}
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetInstanceTypeByNodeName(name)
+ }
vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe)
if err != nil {
@@ -454,15 +515,20 @@ func (ss *ScaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain.
func (ss *ScaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(name, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return cloudprovider.Zone{}, err
}
- if managedByAS {
+
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet.GetZoneByNodeName(name)
}
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetZoneByNodeName(name)
+ }
vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe)
if err != nil {
@@ -487,7 +553,7 @@ func (ss *ScaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
} else {
err = fmt.Errorf("failed to get zone info")
klog.Errorf("GetZoneByNodeName: got unexpected error %v", err)
- _ = ss.deleteCacheForNode(name)
+ _ = ss.DeleteCacheForNode(name)
return cloudprovider.Zone{}, err
}
@@ -505,6 +571,21 @@ func (ss *ScaleSet) GetPrimaryVMSetName() string {
// GetIPByNodeName gets machine private IP and public IP by node name.
func (ss *ScaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("Failed to check VM management type: %v", err)
+ return "", "", err
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ return ss.availabilitySet.GetIPByNodeName(nodeName)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetIPByNodeName(nodeName)
+ }
+
nic, err := ss.GetPrimaryInterface(nodeName)
if err != nil {
klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
@@ -568,6 +649,21 @@ func (ss *ScaleSet) getVMSSPublicIPAddress(resourceGroupName string, virtualMach
// allowing users to split ipv4/v6 on multiple nics
func (ss *ScaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) {
ips := make([]string, 0)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("Failed to check VM management type: %v", err)
+ return ips, err
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ return ss.availabilitySet.GetPrivateIPsByNodeName(nodeName)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetPrivateIPsByNodeName(nodeName)
+ }
+
nic, err := ss.GetPrimaryInterface(nodeName)
if err != nil {
klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
@@ -698,21 +794,21 @@ func (ss *ScaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.Azure
vmsses := cached.(*sync.Map)
vmsses.Range(func(key, value interface{}) bool {
- v := value.(*vmssEntry)
- if v.vmss.Name == nil {
+ v := value.(*VMSSEntry)
+ if v.VMSS.Name == nil {
return true
}
- vmssPrefix := *v.vmss.Name
- if v.vmss.VirtualMachineProfile != nil &&
- v.vmss.VirtualMachineProfile.OsProfile != nil &&
- v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix != nil {
- vmssPrefix = *v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix
+ vmssPrefix := *v.VMSS.Name
+ if v.VMSS.VirtualMachineProfile != nil &&
+ v.VMSS.VirtualMachineProfile.OsProfile != nil &&
+ v.VMSS.VirtualMachineProfile.OsProfile.ComputerNamePrefix != nil {
+ vmssPrefix = *v.VMSS.VirtualMachineProfile.OsProfile.ComputerNamePrefix
}
if strings.EqualFold(vmssPrefix, nodeName[:len(nodeName)-6]) {
- node.vmssName = *v.vmss.Name
- node.resourceGroup = v.resourceGroup
+ node.vmssName = *v.VMSS.Name
+ node.resourceGroup = v.ResourceGroup
return false
}
@@ -811,9 +907,9 @@ func (ss *ScaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]str
return scaleSetNames, nil
}
- scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
+ scaleSetNames, err := ss.GetAgentPoolVMSetNames(nodes)
if err != nil {
- klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
+ klog.Errorf("ss.GetVMSetNames - GetAgentPoolVMSetNames failed err=(%v)", err)
return nil, err
}
if len(*scaleSetNames) == 0 {
@@ -832,7 +928,7 @@ func (ss *ScaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]str
}
if !found {
klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetName)
- return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetName)
+ return nil, ErrScaleSetNotFound
}
return &[]string{serviceVMSetName}, nil
}
@@ -852,15 +948,20 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
func (ss *ScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, azcache.CacheReadTypeDefault)
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return network.Interface{}, err
}
- if managedByAS {
+
+ if vmManagementType == ManagedByAvSet {
// vm is managed by availability set.
return ss.availabilitySet.GetPrimaryInterface(nodeName)
}
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetPrimaryInterface(nodeName)
+ }
vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
if err != nil {
@@ -932,7 +1033,7 @@ func (ss *ScaleSet) getPrimaryNetworkInterfaceConfiguration(networkConfiguration
}
// getPrimaryNetworkInterfaceConfigurationForScaleSet gets primary network interface configuration for scale set.
-func (ss *ScaleSet) getPrimaryNetworkInterfaceConfigurationForScaleSet(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, vmssName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
+func getPrimaryNetworkInterfaceConfigurationForScaleSet(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, vmssName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
if len(networkConfigurations) == 1 {
return &networkConfigurations[0], nil
}
@@ -963,14 +1064,14 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale
return nil, fmt.Errorf("failed to find a primary IP configuration")
}
-func (ss *ScaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string, IPv6 bool) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
+func getConfigForScaleSetByIPFamily(config *compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string, IPv6 bool) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
ipConfigurations := *config.IPConfigurations
var ipVersion compute.IPVersion
if IPv6 {
- ipVersion = compute.IPVersionIPv6
+ ipVersion = compute.IPv6
} else {
- ipVersion = compute.IPVersionIPv4
+ ipVersion = compute.IPv4
}
for idx := range ipConfigurations {
ipConfig := &ipConfigurations[idx]
@@ -994,7 +1095,9 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
}
klog.Errorf("EnsureHostInPool: failed to get VMSS VM %s: %v", vmName, err)
- return "", "", "", nil, err
+ if !errors.Is(err, ErrorNotVmssInstance) {
+ return "", "", "", nil, err
+ }
}
klog.V(2).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vm.VMSSName, backendPoolID)
@@ -1052,7 +1155,7 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
// For IPv6 or dualstack service, we need to pick the right IP configuration based on the cluster ip family
// IPv6 configuration is only supported as non-primary, so we need to fetch the ip configuration where the
// privateIPAddressVersion matches the clusterIP family
- primaryIPConfiguration, err = ss.getConfigForScaleSetByIPFamily(primaryNetworkInterfaceConfiguration, vmName, ipv6)
+ primaryIPConfiguration, err = getConfigForScaleSetByIPFamily(primaryNetworkInterfaceConfiguration, vmName, ipv6)
if err != nil {
return "", "", "", nil, err
}
@@ -1130,6 +1233,14 @@ func getVmssAndResourceGroupNameByVMProviderID(providerID string) (string, strin
return matches[1], matches[2], nil
}
+func getVmssAndResourceGroupNameByVMID(id string) (string, string, error) {
+ matches := vmssVMResourceIDRE.FindStringSubmatch(id)
+ if len(matches) != 3 {
+ return "", "", ErrorNotVmssInstance
+ }
+ return matches[1], matches[2], nil
+}
+
func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
klog.V(2).Infof("ensureVMSSInPool: ensuring VMSS with backendPoolID %s", backendPoolID)
vmssNamesMap := make(map[string]bool)
@@ -1153,10 +1264,25 @@ func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
}
// in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes
- resourceGroupName, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID)
- if err != nil {
- klog.V(4).Infof("ensureVMSSInPool: found VMAS node %s, will skip checking and continue", node.Name)
- continue
+ var resourceGroupName, vmssName string
+ if node.Spec.ProviderID != "" {
+ resourceGroupName, vmssName, err = getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID)
+ if err != nil {
+ klog.V(4).Infof("ensureVMSSInPool: the provider ID %s of node %s is not the format of VMSS VM, will skip checking and continue", node.Spec.ProviderID, node.Name)
+ continue
+ }
+ } else {
+ klog.V(4).Infof("ensureVMSSInPool: the provider ID of node %s is empty, will check the VM ID", node.Name)
+ instanceID, err := ss.InstanceID(context.TODO(), types.NodeName(node.Name))
+ if err != nil {
+ klog.Errorf("ensureVMSSInPool: Failed to get instance ID for node %q: %v", node.Name, err)
+ return err
+ }
+ resourceGroupName, vmssName, err = getVmssAndResourceGroupNameByVMID(instanceID)
+ if err != nil {
+ klog.V(4).Infof("ensureVMSSInPool: the instance ID %s of node %s is not the format of VMSS VM, will skip checking and continue", node.Spec.ProviderID, node.Name)
+ continue
+ }
}
// only vmsses in the resource group same as it's in azure config are included
if strings.EqualFold(resourceGroupName, ss.ResourceGroup) {
@@ -1186,7 +1312,7 @@ func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
continue
}
vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
- primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
+ primaryNIC, err := getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
if err != nil {
return err
}
@@ -1200,7 +1326,7 @@ func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
return err
}
} else {
- primaryIPConfig, err = ss.getConfigForScaleSetByIPFamily(primaryNIC, "", ipv6)
+ primaryIPConfig, err = getConfigForScaleSetByIPFamily(primaryNIC, "", ipv6)
if err != nil {
return err
}
@@ -1270,9 +1396,7 @@ func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
return nil
}
-// EnsureHostsInPool ensures the given Node's primary IP configurations are
-// participating in the specified LoadBalancer Backend Pool.
-func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
+func (ss *ScaleSet) ensureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
mc := metrics.NewMetricContext("services", "vmss_ensure_hosts_in_pool", ss.ResourceGroup, ss.SubscriptionID, getServiceName(service))
isOperationSucceeded := false
defer func() {
@@ -1300,28 +1424,6 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
continue
}
- // Check whether the node is VMAS virtual machine.
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, azcache.CacheReadTypeDefault)
- if err != nil {
- klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
- errors = append(errors, err)
- continue
- }
-
- if managedByAS {
- // VMAS nodes should also be added to the SLB backends.
- if ss.useStandardLoadBalancer() {
- hostUpdates = append(hostUpdates, func() error {
- _, _, _, _, err := ss.availabilitySet.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetNameOfLB)
- return err
- })
- continue
- }
-
- klog.V(3).Infof("EnsureHostsInPool skips node %s because VMAS nodes couldn't be added to basic LB with VMSS backends", localNodeName)
- continue
- }
-
nodeResourceGroup, nodeVMSS, nodeInstanceID, nodeVMSSVM, err := ss.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetNameOfLB)
if err != nil {
klog.Errorf("EnsureHostInPool(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
@@ -1345,7 +1447,7 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
// Invalidate the cache since the VMSS VM would be updated.
defer func() {
- _ = ss.deleteCacheForNode(localNodeName)
+ _ = ss.DeleteCacheForNode(localNodeName)
}()
}
@@ -1357,10 +1459,24 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
hostUpdates = append(hostUpdates, func() error {
ctx, cancel := getContextWithCancel()
defer cancel()
- klog.V(2).Infof("EnsureHostInPool begins to UpdateVMs for VMSS(%s, %s) with new backendPoolID %s", meta.resourceGroup, meta.vmssName, backendPoolID)
- rerr := ss.VirtualMachineScaleSetVMsClient.UpdateVMs(ctx, meta.resourceGroup, meta.vmssName, update, "network_update", ss.getPutVMSSVMBatchSize())
+
+ logFields := []interface{}{
+ "operation", "EnsureHostsInPool UpdateVMSSVMs",
+ "vmssName", meta.vmssName,
+ "resourceGroup", meta.resourceGroup,
+ "backendPoolID", backendPoolID,
+ }
+
+ batchSize, err := ss.VMSSBatchSize(meta.vmssName)
+ if err != nil {
+ klog.ErrorS(err, "Failed to get vmss batch size", logFields...)
+ return err
+ }
+
+ klog.V(2).InfoS("Begin to update VMs for VMSS with new backendPoolID", logFields...)
+ rerr := ss.VirtualMachineScaleSetVMsClient.UpdateVMs(ctx, meta.resourceGroup, meta.vmssName, update, "network_update", batchSize)
if rerr != nil {
- klog.Errorf("EnsureHostInPool UpdateVMs for VMSS(%s, %s) failed with error %v", meta.resourceGroup, meta.vmssName, rerr.Error())
+ klog.ErrorS(err, "Failed to update VMs for VMSS", logFields...)
return rerr.Error()
}
@@ -1388,6 +1504,83 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
return nil
}
+// EnsureHostsInPool ensures the given Node's primary IP configurations are
+// participating in the specified LoadBalancer Backend Pool.
+func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
+ if ss.DisableAvailabilitySetNodes && !ss.EnableVmssFlexNodes {
+ return ss.ensureHostsInPool(service, nodes, backendPoolID, vmSetNameOfLB)
+ }
+ vmssUniformNodes := make([]*v1.Node, 0)
+ vmssFlexNodes := make([]*v1.Node, 0)
+ vmasNodes := make([]*v1.Node, 0)
+ errors := make([]error, 0)
+ for _, node := range nodes {
+ localNodeName := node.Name
+
+ if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
+ klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
+ continue
+ }
+
+ shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
+ return err
+ }
+ if shouldExcludeLoadBalancer {
+ klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
+ continue
+ }
+
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(localNodeName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ klog.Errorf("Failed to check vmManagementType(%s): %v", localNodeName, err)
+ errors = append(errors, err)
+ continue
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ // VMAS nodes should also be added to the SLB backends.
+ if ss.useStandardLoadBalancer() {
+ vmasNodes = append(vmasNodes, node)
+ continue
+ }
+ klog.V(3).Infof("EnsureHostsInPool skips node %s because VMAS nodes couldn't be added to basic LB with VMSS backends", localNodeName)
+ continue
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ if ss.useStandardLoadBalancer() {
+ vmssFlexNodes = append(vmssFlexNodes, node)
+ continue
+ }
+ klog.V(3).Infof("EnsureHostsInPool skips node %s because VMSS Flex nodes deos not support Basic Load Balancer", localNodeName)
+ continue
+ }
+ vmssUniformNodes = append(vmssUniformNodes, node)
+ }
+
+ if len(vmssFlexNodes) > 0 {
+ vmssFlexError := ss.flexScaleSet.EnsureHostsInPool(service, vmssFlexNodes, backendPoolID, vmSetNameOfLB)
+ errors = append(errors, vmssFlexError)
+ }
+
+ if len(vmasNodes) > 0 {
+ vmasError := ss.availabilitySet.EnsureHostsInPool(service, vmasNodes, backendPoolID, vmSetNameOfLB)
+ errors = append(errors, vmasError)
+ }
+
+ if len(vmssUniformNodes) > 0 {
+ vmssUniformError := ss.ensureHostsInPool(service, vmssUniformNodes, backendPoolID, vmSetNameOfLB)
+ errors = append(errors, vmssUniformError)
+ }
+
+ allErrors := utilerrors.Flatten(utilerrors.NewAggregate(errors))
+
+ return allErrors
+}
+
// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted
// from the specified node, which returns (resourceGroup, vmasName, instanceID, vmssVM, error).
func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
@@ -1463,15 +1656,24 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str
// GetNodeNameByIPConfigurationID gets the node name and the VMSS name by IP configuration ID.
func (ss *ScaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+ vmManagementType, err := ss.getVMManagementTypeByIPConfigurationID(ipConfigurationID, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("Failed to check VM management type: %v", err)
+ return "", "", err
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ return ss.availabilitySet.GetNodeNameByIPConfigurationID(ipConfigurationID)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetNodeNameByIPConfigurationID(ipConfigurationID)
+ }
+
matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
if len(matches) != 4 {
- klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID)
- name, rg, err := ss.availabilitySet.GetNodeNameByIPConfigurationID(ipConfigurationID)
- if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) {
- klog.Errorf("Unable to find node by IPConfigurationID %s: %v", ipConfigurationID, err)
- return "", "", ErrorNotVmssInstance
- }
- return name, rg, nil
+ return "", "", fmt.Errorf("can not extract scale set name from ipConfigurationID (%s)", ipConfigurationID)
}
resourceGroup := matches[1]
@@ -1493,7 +1695,7 @@ func (ss *ScaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (st
func getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
if len(matches) != 4 {
- klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID)
+ klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set or vmss flex", ipConfigurationID)
return "", "", ErrorNotVmssInstance
}
@@ -1502,22 +1704,133 @@ func getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID string
return scaleSetName, resourceGroup, nil
}
-func (ss *ScaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backendPoolID, vmSetName string, ipConfigurationIDs []string) error {
- vmssNamesMap := make(map[string]bool)
+func (ss *ScaleSet) ensureBackendPoolDeletedFromVMSS(backendPoolID, vmSetName string) error {
+ if !ss.useStandardLoadBalancer() {
+ found := false
+
+ cachedUniform, err := ss.vmssCache.Get(consts.VMSSKey, azcache.CacheReadTypeDefault)
+ if err != nil {
+ klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get vmss uniform from cache: %v", err)
+ return err
+ }
+ vmssUniformMap := cachedUniform.(*sync.Map)
+
+ vmssUniformMap.Range(func(key, value interface{}) bool {
+ vmssEntry := value.(*VMSSEntry)
+ if to.String(vmssEntry.VMSS.Name) == vmSetName {
+ found = true
+ return false
+ }
+ return true
+ })
+ if found {
+ return ss.ensureBackendPoolDeletedFromVmssUniform(backendPoolID, vmSetName)
+ }
+ flexScaleSet := ss.flexScaleSet.(*FlexScaleSet)
+ cachedFlex, err := flexScaleSet.vmssFlexCache.Get(consts.VmssFlexKey, azcache.CacheReadTypeDefault)
+ if err != nil {
+ klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get vmss flex from cache: %v", err)
+ return err
+ }
+ vmssFlexMap := cachedFlex.(*sync.Map)
+ vmssFlexMap.Range(func(key, value interface{}) bool {
+ vmssFlex := value.(*compute.VirtualMachineScaleSet)
+ if to.String(vmssFlex.Name) == vmSetName {
+ found = true
+ return false
+ }
+ return true
+ })
+
+ if found {
+ return flexScaleSet.ensureBackendPoolDeletedFromVmssFlex(backendPoolID, vmSetName)
+ }
+
+ return cloudprovider.InstanceNotFound
+
+ }
+
+ err := ss.ensureBackendPoolDeletedFromVmssUniform(backendPoolID, vmSetName)
+ if err != nil {
+ return err
+ }
+ if ss.EnableVmssFlexNodes {
+ flexScaleSet := ss.flexScaleSet.(*FlexScaleSet)
+ err = flexScaleSet.ensureBackendPoolDeletedFromVmssFlex(backendPoolID, vmSetName)
+ }
+ return err
+}
+
+func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(backendPoolID, vmSetName string) error {
+ klog.V(2).Infof("ensureBackendPoolDeletedFromVmssUniform: vmSetName (%s), backendPoolID (%s)", vmSetName, backendPoolID)
+
+ vmssNamesMap := make(map[string]bool)
// the standard load balancer supports multiple vmss in its backend while the basic sku doesn't
- if ss.useStandardLoadBalancer() {
- for _, ipConfigurationID := range ipConfigurationIDs {
- // in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes
- vmssName, resourceGroupName, err := getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID)
+ if ss.useStandardLoadBalancer() && !ss.EnableMultipleStandardLoadBalancers {
+ cachedUniform, err := ss.vmssCache.Get(consts.VMSSKey, azcache.CacheReadTypeDefault)
+ if err != nil {
+ klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get vmss uniform from cache: %v", err)
+ return err
+ }
+
+ vmssUniformMap := cachedUniform.(*sync.Map)
+ var errorList []error
+ walk := func(key, value interface{}) bool {
+ var vmss *compute.VirtualMachineScaleSet
+ if vmssEntry, ok := value.(*VMSSEntry); ok {
+ vmss = vmssEntry.VMSS
+ } else if v, ok := value.(*compute.VirtualMachineScaleSet); ok {
+ vmss = v
+ }
+ klog.V(2).Infof("ensureBackendPoolDeletedFromVmssUniform: vmss (%s)", to.String(vmss.Name))
+
+ // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
+ // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
+ if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
+ klog.V(3).Infof("ensureBackendPoolDeletedFromVMSS: found vmss %s being deleted, skipping", to.String(vmss.Name))
+ return true
+ }
+ if vmss.VirtualMachineProfile == nil {
+ klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: vmss %s has no VirtualMachineProfile, skipping", to.String(vmss.Name))
+ return true
+ }
+ if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
+ klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: cannot obtain the primary network interface configuration, of vmss %s", to.String(vmss.Name))
+ return true
+ }
+ vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
+ primaryNIC, err := getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, to.String(vmss.Name))
if err != nil {
- klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: found VMAS ipcConfigurationID %s, will skip checking and continue", ipConfigurationID)
- continue
+ klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", to.String(vmss.Name), err)
+ errorList = append(errorList, err)
+ return true
}
- // only vmsses in the resource group same as it's in azure config are included
- if strings.EqualFold(resourceGroupName, ss.ResourceGroup) {
- vmssNamesMap[vmssName] = true
+ primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
+ if err != nil {
+ klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", to.String(vmss.Name), err)
+ errorList = append(errorList, err)
+ return true
+ }
+ loadBalancerBackendAddressPools := make([]compute.SubResource, 0)
+ if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+ loadBalancerBackendAddressPools = *primaryIPConfig.LoadBalancerBackendAddressPools
}
+ for _, loadBalancerBackendAddressPool := range loadBalancerBackendAddressPools {
+ klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: loadBalancerBackendAddressPool (%s) on vmss (%s)", to.String(loadBalancerBackendAddressPool.ID), to.String(vmss.Name))
+ if strings.EqualFold(to.String(loadBalancerBackendAddressPool.ID), backendPoolID) {
+ klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: found vmss %s with backend pool %s, removing it", to.String(vmss.Name), backendPoolID)
+ vmssNamesMap[to.String(vmss.Name)] = true
+ }
+ }
+
+ return true
+ }
+
+ // Walk through all cached vmss, and find the vmss that contains the backendPoolID.
+ vmssUniformMap.Range(walk)
+ if len(errorList) > 0 {
+ return utilerrors.Flatten(utilerrors.NewAggregate(errorList))
}
} else {
vmssNamesMap[vmSetName] = true
@@ -1526,11 +1839,11 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
return ss.EnsureBackendPoolDeletedFromVMSets(vmssNamesMap, backendPoolID)
}
-// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
-func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
+// ensureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
+func (ss *ScaleSet) ensureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) (bool, error) {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
- return nil
+ return false, nil
}
mc := metrics.NewMetricContext("services", "vmss_ensure_backend_pool_deleted", ss.ResourceGroup, ss.SubscriptionID, getServiceName(service))
@@ -1552,6 +1865,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
}
}
+ // Ensure the backendPoolID is deleted from the VMSS VMs.
hostUpdates := make([]func() error, 0, len(ipConfigurationIDs))
nodeUpdates := make(map[vmssMetaInfo]map[string]compute.VirtualMachineScaleSetVM)
allErrs := make([]error, 0)
@@ -1574,7 +1888,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
}
if errors.Is(err, cloudprovider.InstanceNotFound) {
- klog.Infof("EnsureBackendPoolDeleted(%s): skipping ip config %s because the corresponding vmss vm is not"+
+ klog.Infof("ensureBackendPoolDeleted(%s): skipping ip config %s because the corresponding vmss vm is not"+
" found", getServiceName(service), ipConfigurationID)
continue
}
@@ -1587,7 +1901,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
nodeResourceGroup, nodeVMSS, nodeInstanceID, nodeVMSSVM, err := ss.ensureBackendPoolDeletedFromNode(nodeName, backendPoolID)
if err != nil {
if !errors.Is(err, ErrorNotVmssInstance) { // Do nothing for the VMAS nodes.
- klog.Errorf("EnsureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err)
+ klog.Errorf("ensureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err)
allErrs = append(allErrs, err)
}
continue
@@ -1609,11 +1923,12 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
// Invalidate the cache since the VMSS VM would be updated.
defer func() {
- _ = ss.deleteCacheForNode(nodeName)
+ _ = ss.DeleteCacheForNode(nodeName)
}()
}
// Update VMs with best effort that have already been added to nodeUpdates.
+ var updatedVM bool
for meta, update := range nodeUpdates {
// create new instance of meta and update for passing to anonymous function
meta := meta
@@ -1621,40 +1936,167 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
hostUpdates = append(hostUpdates, func() error {
ctx, cancel := getContextWithCancel()
defer cancel()
- klog.V(2).Infof("EnsureBackendPoolDeleted begins to UpdateVMs for VMSS(%s, %s) with backendPoolID %s", meta.resourceGroup, meta.vmssName, backendPoolID)
- rerr := ss.VirtualMachineScaleSetVMsClient.UpdateVMs(ctx, meta.resourceGroup, meta.vmssName, update, "network_update", ss.getPutVMSSVMBatchSize())
+
+ logFields := []interface{}{
+ "operation", "EnsureBackendPoolDeleted UpdateVMSSVMs",
+ "vmssName", meta.vmssName,
+ "resourceGroup", meta.resourceGroup,
+ "backendPoolID", backendPoolID,
+ }
+
+ batchSize, err := ss.VMSSBatchSize(meta.vmssName)
+ if err != nil {
+ klog.ErrorS(err, "Failed to get vmss batch size", logFields...)
+ return err
+ }
+
+ klog.V(2).InfoS("Begin to update VMs for VMSS with new backendPoolID", logFields...)
+ rerr := ss.VirtualMachineScaleSetVMsClient.UpdateVMs(ctx, meta.resourceGroup, meta.vmssName, update, "network_update", batchSize)
if rerr != nil {
- klog.Errorf("EnsureBackendPoolDeleted UpdateVMs for VMSS(%s, %s) failed with error %v", meta.resourceGroup, meta.vmssName, rerr.Error())
+ klog.ErrorS(err, "Failed to update VMs for VMSS", logFields...)
return rerr.Error()
}
+ updatedVM = true
return nil
})
}
errs := utilerrors.AggregateGoroutines(hostUpdates...)
if errs != nil {
- return utilerrors.Flatten(errs)
+ return updatedVM, utilerrors.Flatten(errs)
}
// Fail if there are other errors.
if len(allErrs) > 0 {
- return utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
+ return updatedVM, utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
}
- // Ensure the backendPoolID is also deleted on VMSS itself.
+ isOperationSucceeded = true
+ return updatedVM, nil
+}
+
+// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
+func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) (bool, error) {
+ if backendAddressPools == nil {
+ return false, nil
+ }
+ vmssUniformBackendIPConfigurations := []network.InterfaceIPConfiguration{}
+ vmssFlexBackendIPConfigurations := []network.InterfaceIPConfiguration{}
+ avSetBackendIPConfigurations := []network.InterfaceIPConfiguration{}
+
+ for _, backendPool := range *backendAddressPools {
+ if strings.EqualFold(*backendPool.ID, backendPoolID) && backendPool.BackendIPConfigurations != nil {
+ for _, ipConf := range *backendPool.BackendIPConfigurations {
+ if ipConf.ID == nil {
+ continue
+ }
+
+ vmManagementType, err := ss.getVMManagementTypeByIPConfigurationID(*ipConf.ID, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Warningf("Failed to check VM management type by ipConfigurationID %s: %v, skip it", *ipConf.ID, err)
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ avSetBackendIPConfigurations = append(avSetBackendIPConfigurations, ipConf)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ vmssFlexBackendIPConfigurations = append(vmssFlexBackendIPConfigurations, ipConf)
+ }
+ if vmManagementType == ManagedByVmssUniform {
+ // vm is managed by vmss flex.
+ vmssUniformBackendIPConfigurations = append(vmssUniformBackendIPConfigurations, ipConf)
+ }
+ }
+ }
+ }
+
+ // make sure all vmss including uniform and flex are decoupled from
+ // the lb backend pool even if there is no ipConfigs in the backend pool.
if deleteFromVMSet {
- err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
+ err := ss.ensureBackendPoolDeletedFromVMSS(backendPoolID, vmSetName)
if err != nil {
- return err
+ return false, err
}
}
- isOperationSucceeded = true
- return nil
+ var updated bool
+ if len(vmssUniformBackendIPConfigurations) > 0 {
+ vmssUniformBackendPools := &[]network.BackendAddressPool{
+ {
+ ID: to.StringPtr(backendPoolID),
+ BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
+ BackendIPConfigurations: &vmssUniformBackendIPConfigurations,
+ },
+ },
+ }
+ updatedVM, err := ss.ensureBackendPoolDeleted(service, backendPoolID, vmSetName, vmssUniformBackendPools)
+ if err != nil {
+ return false, err
+ }
+ if updatedVM {
+ updated = true
+ }
+ }
+
+ if len(vmssFlexBackendIPConfigurations) > 0 {
+ vmssFlexBackendPools := &[]network.BackendAddressPool{
+ {
+ ID: to.StringPtr(backendPoolID),
+ BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
+ BackendIPConfigurations: &vmssFlexBackendIPConfigurations,
+ },
+ },
+ }
+ updatedNIC, err := ss.flexScaleSet.EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, vmssFlexBackendPools, false)
+ if err != nil {
+ return false, err
+ }
+ if updatedNIC {
+ updated = true
+ }
+ }
+
+ if len(avSetBackendIPConfigurations) > 0 {
+ avSetBackendPools := &[]network.BackendAddressPool{
+ {
+ ID: to.StringPtr(backendPoolID),
+ BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
+ BackendIPConfigurations: &avSetBackendIPConfigurations,
+ },
+ },
+ }
+ updatedNIC, err := ss.availabilitySet.EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, avSetBackendPools, false)
+ if err != nil {
+ return false, err
+ }
+ if updatedNIC {
+ updated = true
+ }
+ }
+
+ return updated, nil
+
}
// GetNodeCIDRMaskByProviderID returns the node CIDR subnet mask by provider ID.
func (ss *ScaleSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
+ vmManagementType, err := ss.getVMManagementTypeByProviderID(providerID, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("Failed to check VM management type: %v", err)
+ return 0, 0, err
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ return ss.availabilitySet.GetNodeCIDRMasksByProviderID(providerID)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetNodeCIDRMasksByProviderID(providerID)
+ }
+
_, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(providerID)
if err != nil {
return 0, 0, err
@@ -1682,7 +2124,7 @@ func (ss *ScaleSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, e
return ipv4Mask, ipv6Mask, nil
}
-//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS
+// EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS
func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]bool, backendPoolID string) error {
vmssUpdaters := make([]func() error, 0, len(vmssNamesMap))
errors := make([]error, 0, len(vmssNamesMap))
@@ -1698,23 +2140,23 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
- klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName)
+ klog.V(3).Infof("EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName)
continue
}
if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
- klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vmss %s", vmssName)
+ klog.V(4).Infof("EnsureBackendPoolDeletedFromVMSets: cannot obtain the primary network interface configuration, of vmss %s", vmssName)
continue
}
vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
- primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
+ primaryNIC, err := getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
if err != nil {
- klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
+ klog.Errorf("EnsureBackendPoolDeletedFromVMSets: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
errors = append(errors, err)
continue
}
primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
if err != nil {
- klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
+ klog.Errorf("EnsureBackendPoolDeletedFromVMSets: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
errors = append(errors, err)
continue
}
@@ -1728,7 +2170,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b
for i := len(loadBalancerBackendAddressPools) - 1; i >= 0; i-- {
curPool := loadBalancerBackendAddressPools[i]
if strings.EqualFold(backendPoolID, *curPool.ID) {
- klog.V(10).Infof("ensureBackendPoolDeletedFromVMSS gets unwanted backend pool %q for VMSS %s", backendPoolID, vmssName)
+ klog.V(10).Infof("EnsureBackendPoolDeletedFromVMSets gets unwanted backend pool %q for VMSS %s", backendPoolID, vmssName)
found = true
newBackendPools = append(loadBalancerBackendAddressPools[:i], loadBalancerBackendAddressPools[i+1:]...)
}
@@ -1751,10 +2193,10 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b
},
}
- klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
+ klog.V(2).Infof("EnsureBackendPoolDeletedFromVMSets begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
if rerr != nil {
- klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
+ klog.Errorf("EnsureBackendPoolDeletedFromVMSets CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
return rerr.Error()
}
@@ -1779,25 +2221,26 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b
// like capz allows mixed instance type.
func (ss *ScaleSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
vmSetNames := make([]string, 0)
- as := ss.availabilitySet.(*availabilitySet)
+
+ vmssFlexVMNodes := make([]*v1.Node, 0)
+ avSetVMNodes := make([]*v1.Node, 0)
for _, node := range nodes {
var names *[]string
- managedByAS, err := ss.isNodeManagedByAvailabilitySet(node.Name, azcache.CacheReadTypeDefault)
+
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(node.Name, azcache.CacheReadTypeDefault)
if err != nil {
- return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to check if the node %s is managed by VMAS: %w", node.Name, err)
+ return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to check the node %s management type: %w", node.Name, err)
}
- if managedByAS {
- cached, err := ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, azcache.CacheReadTypeDefault)
- if err != nil {
- return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to get availabilitySetNodesCache")
- }
- vms := cached.(availabilitySetNodeEntry).vms
- names, err = as.getAgentPoolAvailabilitySets(vms, []*v1.Node{node})
- if err != nil {
- return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to execute getAgentPoolAvailabilitySets: %w", err)
- }
- vmSetNames = append(vmSetNames, *names...)
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by vmss flex.
+ avSetVMNodes = append(avSetVMNodes, node)
+ continue
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ vmssFlexVMNodes = append(vmssFlexVMNodes, node)
continue
}
@@ -1808,17 +2251,62 @@ func (ss *ScaleSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error)
vmSetNames = append(vmSetNames, *names...)
}
+ if len(vmssFlexVMNodes) > 0 {
+ vmssFlexVMnames, err := ss.flexScaleSet.GetAgentPoolVMSetNames(vmssFlexVMNodes)
+ if err != nil {
+ return nil, fmt.Errorf("ss.flexScaleSet.GetAgentPoolVMSetNames: failed to execute : %w", err)
+ }
+ vmSetNames = append(vmSetNames, *vmssFlexVMnames...)
+ }
+
+ if len(avSetVMNodes) > 0 {
+ avSetVMnames, err := ss.availabilitySet.GetAgentPoolVMSetNames(avSetVMNodes)
+ if err != nil {
+ return nil, fmt.Errorf("ss.availabilitySet.GetAgentPoolVMSetNames: failed to execute : %w", err)
+ }
+ vmSetNames = append(vmSetNames, *avSetVMnames...)
+ }
+
return &vmSetNames, nil
}
func (ss *ScaleSet) GetNodeVMSetName(node *v1.Node) (string, error) {
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(node.Name, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("Failed to check VM management type: %v", err)
+ return "", err
+ }
+
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ return ss.availabilitySet.GetNodeVMSetName(node)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.GetNodeVMSetName(node)
+ }
+
providerID := node.Spec.ProviderID
_, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(providerID)
if err != nil {
- klog.Warningf("ss.GetNodeVMSetName: the provider ID %s of node %s does not match the format of a VMSS instance, assuming it is managed by an availability set", providerID, node.Name)
- return ss.availabilitySet.GetNodeVMSetName(node)
+ klog.Errorf("getVmssAndResourceGroupNameByVMProviderID failed: %v", err)
+ return "", err
}
klog.V(4).Infof("ss.GetNodeVMSetName: found vmss name %s from node name %s", vmssName, node.Name)
return vmssName, nil
}
+
+// VMSSBatchSize returns the batch size for VMSS operations.
+func (ss *ScaleSet) VMSSBatchSize(vmssName string) (int, error) {
+ batchSize := 0
+ vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return 0, fmt.Errorf("get vmss batch size: %w", err)
+ }
+ if _, ok := vmss.Tags[consts.VMSSTagForBatchOperation]; ok {
+ batchSize = ss.getPutVMSSVMBatchSize()
+ }
+ klog.V(2).InfoS("Fetch VMSS batch size", "vmss", vmssName, "size", batchSize)
+ return batchSize, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go
index ebf3591f8a2a..47ff879cb783 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go
@@ -23,7 +23,7 @@ import (
"sync"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/apimachinery/pkg/util/sets"
@@ -33,38 +33,55 @@ import (
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
)
-type vmssVirtualMachinesEntry struct {
- resourceGroup string
- vmssName string
- instanceID string
- virtualMachine *compute.VirtualMachineScaleSetVM
- lastUpdate time.Time
+type VMSSVirtualMachineEntry struct {
+ ResourceGroup string
+ VMSSName string
+ InstanceID string
+ VirtualMachine *compute.VirtualMachineScaleSetVM
+ LastUpdate time.Time
}
-type vmssEntry struct {
- vmss *compute.VirtualMachineScaleSet
- resourceGroup string
- lastUpdate time.Time
+type VMSSEntry struct {
+ VMSS *compute.VirtualMachineScaleSet
+ ResourceGroup string
+ LastUpdate time.Time
}
-type availabilitySetNodeEntry struct {
- vmNames sets.String
- nodeNames sets.String
- vms []compute.VirtualMachine
+type NonVmssUniformNodesEntry struct {
+ VMSSFlexVMNodeNames sets.String
+ VMSSFlexVMProviderIDs sets.String
+ AvSetVMNodeNames sets.String
+ AvSetVMProviderIDs sets.String
+ ClusterNodeNames sets.String
}
-func (ss *ScaleSet) newVMSSCache() (*azcache.TimedCache, error) {
+type VMManagementType string
+
+const (
+ ManagedByVmssUniform VMManagementType = "ManagedByVmssUniform"
+ ManagedByVmssFlex VMManagementType = "ManagedByVmssFlex"
+ ManagedByAvSet VMManagementType = "ManagedByAvSet"
+ ManagedByUnknownVMSet VMManagementType = "ManagedByUnknownVMSet"
+)
+
+func (ss *ScaleSet) newVMSSCache(ctx context.Context) (*azcache.TimedCache, error) {
getter := func(key string) (interface{}, error) {
- localCache := &sync.Map{} // [vmasName]*vmssEntry
+ localCache := &sync.Map{} // [vmssName]*vmssEntry
allResourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
}
+ resourceGroupNotFound := false
for _, resourceGroup := range allResourceGroups.List() {
- allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(context.Background(), resourceGroup)
+ allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
if rerr != nil {
+ if rerr.IsNotFound() {
+ klog.Warningf("Skip caching vmss for resource group %s due to error: %v", resourceGroup, rerr.Error())
+ resourceGroupNotFound = true
+ continue
+ }
klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr)
return nil, rerr.Error()
}
@@ -75,14 +92,27 @@ func (ss *ScaleSet) newVMSSCache() (*azcache.TimedCache, error) {
klog.Warning("failed to get the name of VMSS")
continue
}
- localCache.Store(*scaleSet.Name, &vmssEntry{
- vmss: &scaleSet,
- resourceGroup: resourceGroup,
- lastUpdate: time.Now().UTC(),
- })
+ if scaleSet.OrchestrationMode == "" || scaleSet.OrchestrationMode == compute.Uniform {
+ localCache.Store(*scaleSet.Name, &VMSSEntry{
+ VMSS: &scaleSet,
+ ResourceGroup: resourceGroup,
+ LastUpdate: time.Now().UTC(),
+ })
+ }
}
}
+ if resourceGroupNotFound {
+ // gc vmss vm cache when there is resource group not found
+ vmssVMKeys := ss.vmssVMCache.Store.ListKeys()
+ for _, cacheKey := range vmssVMKeys {
+ vmssName := cacheKey[strings.LastIndex(cacheKey, "/")+1:]
+ if _, ok := localCache.Load(vmssName); !ok {
+ klog.V(2).Infof("remove vmss %s from vmssVMCache due to rg not found", cacheKey)
+ _ = ss.vmssVMCache.Delete(cacheKey)
+ }
+ }
+ }
return localCache, nil
}
@@ -106,75 +136,59 @@ func extractVmssVMName(name string) (string, string, error) {
return ssName, instanceID, nil
}
-// getVMSSVMCache returns an *azcache.TimedCache and cache key for a VMSS (creating that cache if new).
-func (ss *ScaleSet) getVMSSVMCache(resourceGroup, vmssName string) (string, *azcache.TimedCache, error) {
- cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName))
- if entry, ok := ss.vmssVMCache.Load(cacheKey); ok {
- cache := entry.(*azcache.TimedCache)
- return cacheKey, cache, nil
+func (ss *ScaleSet) getVMSSVMsFromCache(resourceGroup, vmssName string, crt azcache.AzureCacheReadType) (*sync.Map, error) {
+ cacheKey := getVMSSVMCacheKey(resourceGroup, vmssName)
+ entry, err := ss.vmssVMCache.Get(cacheKey, crt)
+ if err != nil {
+ return nil, err
}
- cache, err := ss.newVMSSVirtualMachinesCache(resourceGroup, vmssName, cacheKey)
- if err != nil {
- return "", nil, err
+ if entry == nil {
+ err = fmt.Errorf("vmssVMCache entry for resourceGroup (%s), vmssName (%s) returned nil data", resourceGroup, vmssName)
+ return nil, err
}
- ss.vmssVMCache.Store(cacheKey, cache)
- return cacheKey, cache, nil
+
+ virtualMachines := entry.(*sync.Map)
+ return virtualMachines, nil
}
// gcVMSSVMCache delete stale VMSS VMs caches from deleted VMSSes.
func (ss *ScaleSet) gcVMSSVMCache() error {
- cached, err := ss.vmssCache.Get(consts.VMSSKey, azcache.CacheReadTypeUnsafe)
- if err != nil {
- return err
- }
-
- vmsses := cached.(*sync.Map)
- removed := map[string]bool{}
- ss.vmssVMCache.Range(func(key, value interface{}) bool {
- cacheKey := key.(string)
- vlistIdx := cacheKey[strings.LastIndex(cacheKey, "/")+1:]
- if _, ok := vmsses.Load(vlistIdx); !ok {
- removed[cacheKey] = true
- }
- return true
- })
-
- for key := range removed {
- ss.vmssVMCache.Delete(key)
- }
-
- return nil
+ return ss.vmssCache.Delete(consts.VMSSKey)
}
// newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS.
-func (ss *ScaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
+func (ss *ScaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) {
vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second
- getter := func(key string) (interface{}, error) {
- localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
+ getter := func(cacheKey string) (interface{}, error) {
+ localCache := &sync.Map{} // [nodeName]*VMSSVirtualMachineEntry
- oldCache := make(map[string]vmssVirtualMachinesEntry)
+ oldCache := make(map[string]*VMSSVirtualMachineEntry)
- if vmssCache, ok := ss.vmssVMCache.Load(cacheKey); ok {
- // get old cache before refreshing the cache
- cache := vmssCache.(*azcache.TimedCache)
- entry, exists, err := cache.Store.GetByKey(cacheKey)
- if err != nil {
- return nil, err
- }
- if exists {
- cached := entry.(*azcache.AzureCacheEntry).Data
- if cached != nil {
- virtualMachines := cached.(*sync.Map)
- virtualMachines.Range(func(key, value interface{}) bool {
- oldCache[key.(string)] = *value.(*vmssVirtualMachinesEntry)
- return true
- })
- }
+ entry, exists, err := ss.vmssVMCache.Store.GetByKey(cacheKey)
+ if err != nil {
+ return nil, err
+ }
+ if exists {
+ cached := entry.(*azcache.AzureCacheEntry).Data
+ if cached != nil {
+ virtualMachines := cached.(*sync.Map)
+ virtualMachines.Range(func(key, value interface{}) bool {
+ oldCache[key.(string)] = value.(*VMSSVirtualMachineEntry)
+ return true
+ })
}
}
+ result := strings.Split(cacheKey, "/")
+ if len(result) < 2 {
+ err = fmt.Errorf("Invalid cacheKey (%s)", cacheKey)
+ return nil, err
+ }
+
+ resourceGroupName, vmssName := result[0], result[1]
+
vms, err := ss.listScaleSetVMs(vmssName, resourceGroupName)
if err != nil {
return nil, err
@@ -193,18 +207,18 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac
continue
}
- vmssVMCacheEntry := &vmssVirtualMachinesEntry{
- resourceGroup: resourceGroupName,
- vmssName: vmssName,
- instanceID: to.String(vm.InstanceID),
- virtualMachine: &vm,
- lastUpdate: time.Now().UTC(),
+ vmssVMCacheEntry := &VMSSVirtualMachineEntry{
+ ResourceGroup: resourceGroupName,
+ VMSSName: vmssName,
+ InstanceID: to.String(vm.InstanceID),
+ VirtualMachine: &vm,
+ LastUpdate: time.Now().UTC(),
}
// set cache entry to nil when the VM is under deleting.
if vm.VirtualMachineScaleSetVMProperties != nil &&
strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) {
klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName)
- vmssVMCacheEntry.virtualMachine = nil
+ vmssVMCacheEntry.VirtualMachine = nil
}
localCache.Store(computerName, vmssVMCacheEntry)
@@ -216,24 +230,24 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac
for name, vmEntry := range oldCache {
// if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache
// then it should not be added back to the cache
- if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > vmssVirtualMachinesCacheTTL {
+ if vmEntry.VirtualMachine == nil && time.Since(vmEntry.LastUpdate) > vmssVirtualMachinesCacheTTL {
klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
continue
}
- lastUpdate := time.Now().UTC()
- if vmEntry.virtualMachine == nil {
+ LastUpdate := time.Now().UTC()
+ if vmEntry.VirtualMachine == nil {
// if this is already a nil entry then keep the time the nil
// entry was first created, so we can cleanup unwanted entries
- lastUpdate = vmEntry.lastUpdate
+ LastUpdate = vmEntry.LastUpdate
}
klog.V(5).Infof("adding old entries to new cache for %s", name)
- localCache.Store(name, &vmssVirtualMachinesEntry{
- resourceGroup: vmEntry.resourceGroup,
- vmssName: vmEntry.vmssName,
- instanceID: vmEntry.instanceID,
- virtualMachine: nil,
- lastUpdate: lastUpdate,
+ localCache.Store(name, &VMSSVirtualMachineEntry{
+ ResourceGroup: vmEntry.ResourceGroup,
+ VMSSName: vmEntry.VMSSName,
+ InstanceID: vmEntry.InstanceID,
+ VirtualMachine: nil,
+ LastUpdate: LastUpdate,
})
}
@@ -243,52 +257,108 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac
return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter)
}
-func (ss *ScaleSet) deleteCacheForNode(nodeName string) error {
- node, err := ss.getNodeIdentityByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
+// DeleteCacheForNode deletes Node from VMSS VM and VM caches.
+func (ss *ScaleSet) DeleteCacheForNode(nodeName string) error {
+ vmManagementType, err := ss.getVMManagementTypeByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
+ klog.Errorf("Failed to check VM management type: %v", err)
return err
}
- cacheKey, timedcache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
+ if vmManagementType == ManagedByAvSet {
+ // vm is managed by availability set.
+ return ss.availabilitySet.DeleteCacheForNode(nodeName)
+ }
+ if vmManagementType == ManagedByVmssFlex {
+ // vm is managed by vmss flex.
+ return ss.flexScaleSet.DeleteCacheForNode(nodeName)
+ }
+
+ node, err := ss.getNodeIdentityByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
if err != nil {
- klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
+ klog.Errorf("DeleteCacheForNode(%s) failed with error: %v", nodeName, err)
return err
}
- vmcache, err := timedcache.Get(cacheKey, azcache.CacheReadTypeUnsafe)
+ err = ss.vmssVMCache.Delete(getVMSSVMCacheKey(node.resourceGroup, node.vmssName))
if err != nil {
- klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
+ klog.Errorf("DeleteCacheForNode(%s) failed to remove from vmssVMCache with error: %v", nodeName, err)
return err
}
- virtualMachines := vmcache.(*sync.Map)
- virtualMachines.Delete(nodeName)
if err := ss.gcVMSSVMCache(); err != nil {
- klog.Errorf("deleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err)
+ klog.Errorf("DeleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err)
+ }
+
+ return nil
+}
+
+func (ss *ScaleSet) updateCache(nodeName, resourceGroupName, vmssName, instanceID string, updatedVM *compute.VirtualMachineScaleSetVM) error {
+ // lock the VMSS entry to ensure a consistent view of the VM map when there are concurrent updates.
+ cacheKey := getVMSSVMCacheKey(resourceGroupName, vmssName)
+ ss.lockMap.LockEntry(cacheKey)
+ defer ss.lockMap.UnlockEntry(cacheKey)
+
+ virtualMachines, err := ss.getVMSSVMsFromCache(resourceGroupName, vmssName, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ err = fmt.Errorf("updateCache(%s, %s, %s) failed getting vmCache with error: %w", vmssName, resourceGroupName, nodeName, err)
+ return err
}
+ localCache := &sync.Map{}
+
+ vmssVMCacheEntry := &VMSSVirtualMachineEntry{
+ ResourceGroup: resourceGroupName,
+ VMSSName: vmssName,
+ InstanceID: instanceID,
+ VirtualMachine: updatedVM,
+ LastUpdate: time.Now().UTC(),
+ }
+
+ localCache.Store(nodeName, vmssVMCacheEntry)
+
+ virtualMachines.Range(func(key, value interface{}) bool {
+ if key.(string) != nodeName {
+ localCache.Store(key.(string), value.(*VMSSVirtualMachineEntry))
+ }
+ return true
+ })
+
+ ss.vmssVMCache.Update(cacheKey, localCache)
+ klog.V(4).Infof("updateCache(%s, %s, %s) for cacheKey(%s) updated successfully", vmssName, resourceGroupName, nodeName, cacheKey)
return nil
}
-func (ss *ScaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error) {
+func (ss *ScaleSet) newNonVmssUniformNodesCache() (*azcache.TimedCache, error) {
getter := func(key string) (interface{}, error) {
- vmNames := sets.NewString()
+ vmssFlexVMNodeNames := sets.NewString()
+ vmssFlexVMProviderIDs := sets.NewString()
+ avSetVMNodeNames := sets.NewString()
+ avSetVMProviderIDs := sets.NewString()
resourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
}
+ klog.V(2).Infof("refresh the cache of NonVmssUniformNodesCache in rg %v", resourceGroups)
- vmList := make([]compute.VirtualMachine, 0)
for _, resourceGroup := range resourceGroups.List() {
vms, err := ss.Cloud.ListVirtualMachines(resourceGroup)
if err != nil {
- return nil, fmt.Errorf("newAvailabilitySetNodesCache: failed to list vms in the resource group %s: %w", resourceGroup, err)
+ return nil, fmt.Errorf("getter function of nonVmssUniformNodesCache: failed to list vms in the resource group %s: %w", resourceGroup, err)
}
for _, vm := range vms {
- if vm.Name != nil {
- vmNames.Insert(to.String(vm.Name))
- vmList = append(vmList, vm)
+ if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
+ if vm.VirtualMachineScaleSet != nil {
+ vmssFlexVMNodeNames.Insert(strings.ToLower(to.String(vm.OsProfile.ComputerName)))
+ if vm.ID != nil {
+ vmssFlexVMProviderIDs.Insert(ss.ProviderName() + "://" + to.String(vm.ID))
+ }
+ } else {
+ avSetVMNodeNames.Insert(strings.ToLower(to.String(vm.OsProfile.ComputerName)))
+ if vm.ID != nil {
+ avSetVMProviderIDs.Insert(ss.ProviderName() + "://" + to.String(vm.ID))
+ }
+ }
}
}
}
@@ -299,43 +369,131 @@ func (ss *ScaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error)
return nil, err
}
- localCache := availabilitySetNodeEntry{
- vmNames: vmNames,
- nodeNames: nodeNames,
- vms: vmList,
+ localCache := NonVmssUniformNodesEntry{
+ VMSSFlexVMNodeNames: vmssFlexVMNodeNames,
+ VMSSFlexVMProviderIDs: vmssFlexVMProviderIDs,
+ AvSetVMNodeNames: avSetVMNodeNames,
+ AvSetVMProviderIDs: avSetVMProviderIDs,
+ ClusterNodeNames: nodeNames,
}
return localCache, nil
}
- if ss.Config.AvailabilitySetNodesCacheTTLInSeconds == 0 {
- ss.Config.AvailabilitySetNodesCacheTTLInSeconds = consts.AvailabilitySetNodesCacheTTLDefaultInSeconds
+ if ss.Config.NonVmssUniformNodesCacheTTLInSeconds == 0 {
+ ss.Config.NonVmssUniformNodesCacheTTLInSeconds = consts.NonVmssUniformNodesCacheTTLDefaultInSeconds
}
- return azcache.NewTimedcache(time.Duration(ss.Config.AvailabilitySetNodesCacheTTLInSeconds)*time.Second, getter)
+ return azcache.NewTimedcache(time.Duration(ss.Config.NonVmssUniformNodesCacheTTLInSeconds)*time.Second, getter)
}
-func (ss *ScaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt azcache.AzureCacheReadType) (bool, error) {
- // Assume all nodes are managed by VMSS when DisableAvailabilitySetNodes is enabled.
- if ss.DisableAvailabilitySetNodes {
- klog.V(6).Infof("Assuming node %q is managed by VMSS since DisableAvailabilitySetNodes is set to true", nodeName)
- return false, nil
+func (ss *ScaleSet) getVMManagementTypeByNodeName(nodeName string, crt azcache.AzureCacheReadType) (VMManagementType, error) {
+ if ss.DisableAvailabilitySetNodes && !ss.EnableVmssFlexNodes {
+ return ManagedByVmssUniform, nil
}
-
- cached, err := ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, crt)
+ ss.lockMap.LockEntry(consts.VMManagementTypeLockKey)
+ defer ss.lockMap.UnlockEntry(consts.VMManagementTypeLockKey)
+ cached, err := ss.nonVmssUniformNodesCache.Get(consts.NonVmssUniformNodesKey, crt)
if err != nil {
- return false, err
+ return ManagedByUnknownVMSet, err
}
- cachedNodes := cached.(availabilitySetNodeEntry).nodeNames
+ cachedNodes := cached.(NonVmssUniformNodesEntry).ClusterNodeNames
// if the node is not in the cache, assume the node has joined after the last cache refresh and attempt to refresh the cache.
if !cachedNodes.Has(nodeName) {
- klog.V(2).Infof("Node %s has joined the cluster since the last VM cache refresh, refreshing the cache", nodeName)
- cached, err = ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, azcache.CacheReadTypeForceRefresh)
+ if cached.(NonVmssUniformNodesEntry).AvSetVMNodeNames.Has(nodeName) {
+ return ManagedByAvSet, nil
+ }
+
+ if cached.(NonVmssUniformNodesEntry).VMSSFlexVMNodeNames.Has(nodeName) {
+ return ManagedByVmssFlex, nil
+ }
+
+ if isNodeInVMSSVMCache(nodeName, ss.vmssVMCache) {
+ return ManagedByVmssUniform, nil
+ }
+
+ klog.V(2).Infof("Node %s has joined the cluster since the last VM cache refresh in NonVmssUniformNodesEntry, refreshing the cache", nodeName)
+ cached, err = ss.nonVmssUniformNodesCache.Get(consts.NonVmssUniformNodesKey, azcache.CacheReadTypeForceRefresh)
if err != nil {
- return false, err
+ return ManagedByUnknownVMSet, err
}
}
- cachedVMs := cached.(availabilitySetNodeEntry).vmNames
- return cachedVMs.Has(nodeName), nil
+ cachedAvSetVMs := cached.(NonVmssUniformNodesEntry).AvSetVMNodeNames
+ cachedVmssFlexVMs := cached.(NonVmssUniformNodesEntry).VMSSFlexVMNodeNames
+
+ if cachedAvSetVMs.Has(nodeName) {
+ return ManagedByAvSet, nil
+ }
+ if cachedVmssFlexVMs.Has(nodeName) {
+ return ManagedByVmssFlex, nil
+ }
+
+ return ManagedByVmssUniform, nil
+}
+
+func (ss *ScaleSet) getVMManagementTypeByProviderID(providerID string, crt azcache.AzureCacheReadType) (VMManagementType, error) {
+ if ss.DisableAvailabilitySetNodes && !ss.EnableVmssFlexNodes {
+ return ManagedByVmssUniform, nil
+ }
+ _, err := extractScaleSetNameByProviderID(providerID)
+ if err == nil {
+ return ManagedByVmssUniform, nil
+ }
+
+ ss.lockMap.LockEntry(consts.VMManagementTypeLockKey)
+ defer ss.lockMap.UnlockEntry(consts.VMManagementTypeLockKey)
+ cached, err := ss.nonVmssUniformNodesCache.Get(consts.NonVmssUniformNodesKey, crt)
+ if err != nil {
+ return ManagedByUnknownVMSet, err
+ }
+
+ cachedVmssFlexVMProviderIDs := cached.(NonVmssUniformNodesEntry).VMSSFlexVMProviderIDs
+ cachedAvSetVMProviderIDs := cached.(NonVmssUniformNodesEntry).AvSetVMProviderIDs
+
+ if cachedAvSetVMProviderIDs.Has(providerID) {
+ return ManagedByAvSet, nil
+ }
+ if cachedVmssFlexVMProviderIDs.Has(providerID) {
+ return ManagedByVmssFlex, nil
+ }
+ return ManagedByUnknownVMSet, fmt.Errorf("getVMManagementTypeByProviderID : failed to check the providerID %s management type", providerID)
+
+}
+
+func (ss *ScaleSet) getVMManagementTypeByIPConfigurationID(ipConfigurationID string, crt azcache.AzureCacheReadType) (VMManagementType, error) {
+ if ss.DisableAvailabilitySetNodes && !ss.EnableVmssFlexNodes {
+ return ManagedByVmssUniform, nil
+ }
+
+ _, _, err := getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID)
+ if err == nil {
+ return ManagedByVmssUniform, nil
+ }
+
+ ss.lockMap.LockEntry(consts.VMManagementTypeLockKey)
+ defer ss.lockMap.UnlockEntry(consts.VMManagementTypeLockKey)
+ cached, err := ss.nonVmssUniformNodesCache.Get(consts.NonVmssUniformNodesKey, crt)
+ if err != nil {
+ return ManagedByUnknownVMSet, err
+ }
+
+ matches := nicIDRE.FindStringSubmatch(ipConfigurationID)
+ if len(matches) != 3 {
+ return ManagedByUnknownVMSet, fmt.Errorf("can not extract nic name from ipConfigurationID (%s)", ipConfigurationID)
+ }
+
+ nicResourceGroup, nicName := matches[1], matches[2]
+ if nicResourceGroup == "" || nicName == "" {
+ return ManagedByUnknownVMSet, fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
+ }
+
+ vmName := strings.Replace(nicName, "-nic", "", 1)
+
+ cachedAvSetVMs := cached.(NonVmssUniformNodesEntry).AvSetVMNodeNames
+
+ if cachedAvSetVMs.Has(vmName) {
+ return ManagedByAvSet, nil
+ }
+ return ManagedByVmssFlex, nil
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go
new file mode 100644
index 000000000000..a2e301d2e3fc
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go
@@ -0,0 +1,1067 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
+ "github.com/Azure/go-autorest/autorest/to"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+ cloudprovider "k8s.io/cloud-provider"
+ "k8s.io/klog/v2"
+ utilnet "k8s.io/utils/net"
+
+ azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+ "sigs.k8s.io/cloud-provider-azure/pkg/consts"
+ "sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+)
+
+var (
+ // ErrorVmssIDIsEmpty indicates the vmss id is empty.
+ ErrorVmssIDIsEmpty = errors.New("VMSS ID is empty")
+)
+
+// FlexScaleSet implements VMSet interface for Azure Flexible VMSS.
+type FlexScaleSet struct {
+ *Cloud
+
+ vmssFlexCache *azcache.TimedCache
+
+ vmssFlexVMNameToVmssID *sync.Map
+ vmssFlexVMNameToNodeName *sync.Map
+ vmssFlexVMCache *azcache.TimedCache
+
+ // lockMap in cache refresh
+ lockMap *lockMap
+}
+
+func newFlexScaleSet(ctx context.Context, az *Cloud) (VMSet, error) {
+ fs := &FlexScaleSet{
+ Cloud: az,
+ vmssFlexVMNameToVmssID: &sync.Map{},
+ vmssFlexVMNameToNodeName: &sync.Map{},
+ lockMap: newLockMap(),
+ }
+
+ var err error
+ fs.vmssFlexCache, err = fs.newVmssFlexCache(ctx)
+ if err != nil {
+ return nil, err
+ }
+ fs.vmssFlexVMCache, err = fs.newVmssFlexVMCache(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs, nil
+}
+
+// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
+// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
+func (fs *FlexScaleSet) GetPrimaryVMSetName() string {
+ return fs.Config.PrimaryScaleSetName
+}
+
+// getNodeVMSetName returns the vmss flex name by the node name.
+func (fs *FlexScaleSet) getNodeVmssFlexName(nodeName string) (string, error) {
+ vmssFlexID, err := fs.getNodeVmssFlexID(nodeName)
+ if err != nil {
+ return "", err
+ }
+ vmssFlexName, err := getLastSegment(vmssFlexID, "/")
+ if err != nil {
+ return "", err
+ }
+ return vmssFlexName, nil
+
+}
+
+// GetNodeVMSetName returns the availability set or vmss name by the node name.
+// It will return empty string when using standalone vms.
+func (fs *FlexScaleSet) GetNodeVMSetName(node *v1.Node) (string, error) {
+ return fs.getNodeVmssFlexName(node.Name)
+}
+
+// GetAgentPoolVMSetNames returns all vmSet names according to the nodes
+func (fs *FlexScaleSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
+ vmSetNames := make([]string, 0)
+ for _, node := range nodes {
+ vmSetName, err := fs.GetNodeVMSetName(node)
+ if err != nil {
+ klog.Errorf("Unable to get the vmss flex name by node name %s: %v", node.Name, err)
+ continue
+ }
+ vmSetNames = append(vmSetNames, vmSetName)
+ }
+ return &vmSetNames, nil
+}
+
+// GetVMSetNames selects all possible availability sets or scale sets
+// (depending vmType configured) for service load balancer, if the service has
+// no loadbalancer mode annotation returns the primary VMSet. If service annotation
+// for loadbalancer exists then returns the eligible VMSet. The mode selection
+// annotation would be ignored when using one SLB per cluster.
+func (fs *FlexScaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]string, error) {
+ hasMode, isAuto, serviceVMSetName := fs.getServiceLoadBalancerMode(service)
+ useSingleSLB := fs.useStandardLoadBalancer() && !fs.EnableMultipleStandardLoadBalancers
+ if !hasMode || useSingleSLB {
+ // no mode specified in service annotation or use single SLB mode
+ // default to PrimaryScaleSetName
+ vmssFlexNames := &[]string{fs.Config.PrimaryScaleSetName}
+ return vmssFlexNames, nil
+ }
+
+ vmssFlexNames, err := fs.GetAgentPoolVMSetNames(nodes)
+ if err != nil {
+ klog.Errorf("fs.GetVMSetNames - GetAgentPoolVMSetNames failed err=(%v)", err)
+ return nil, err
+ }
+
+ if !isAuto {
+ found := false
+ for asx := range *vmssFlexNames {
+ if strings.EqualFold((*vmssFlexNames)[asx], serviceVMSetName) {
+ found = true
+ serviceVMSetName = (*vmssFlexNames)[asx]
+ break
+ }
+ }
+ if !found {
+ klog.Errorf("fs.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetName)
+ return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetName)
+ }
+ return &[]string{serviceVMSetName}, nil
+ }
+ return vmssFlexNames, nil
+}
+
+// GetNodeNameByProviderID gets the node name by provider ID.
+// providerID example:
+// azure:///subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/flexprofile-mp-0_df53ee36
+// Different from vmas where vm name is always equal to nodeName, we need to further map vmName to actual nodeName in vmssflex.
+// Note: nodeName is always equal to strings.ToLower(*vm.OsProfile.ComputerName)
+func (fs *FlexScaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
+ // NodeName is part of providerID for standard instances.
+ matches := providerIDRE.FindStringSubmatch(providerID)
+ if len(matches) != 2 {
+ return "", errors.New("error splitting providerID")
+ }
+
+ nodeName, err := fs.getNodeNameByVMName(matches[1])
+ if err != nil {
+ return "", err
+ }
+ return types.NodeName(nodeName), nil
+}
+
+// GetInstanceIDByNodeName gets the cloud provider ID by node name.
+// It must return ("", cloudprovider.InstanceNotFound) if the instance does
+// not exist or is no longer running.
+func (fs *FlexScaleSet) GetInstanceIDByNodeName(name string) (string, error) {
+ machine, err := fs.getVmssFlexVM(name, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ return "", err
+ }
+ if machine.ID == nil {
+ return "", fmt.Errorf("ProviderID of node(%s) is nil", name)
+ }
+ resourceID := *machine.ID
+ convertedResourceID, err := ConvertResourceGroupNameToLower(resourceID)
+ if err != nil {
+ klog.Errorf("ConvertResourceGroupNameToLower failed with error: %v", err)
+ return "", err
+ }
+ return convertedResourceID, nil
+
+}
+
+// GetInstanceTypeByNodeName gets the instance type by node name.
+func (fs *FlexScaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
+ machine, err := fs.getVmssFlexVM(name, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("fs.GetInstanceTypeByNodeName(%s) failed: fs.getVmssFlexVMWithoutInstanceView(%s) err=%v", name, name, err)
+ return "", err
+ }
+
+ if machine.HardwareProfile == nil {
+ return "", fmt.Errorf("HardwareProfile of node(%s) is nil", name)
+ }
+ return string(machine.HardwareProfile.VMSize), nil
+}
+
+// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
+// with availability zone, then it returns fault domain.
+// for details, refer to https://kubernetes-sigs.github.io/cloud-provider-azure/topics/availability-zones/#node-labels
+func (fs *FlexScaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
+ vm, err := fs.getVmssFlexVM(name, azcache.CacheReadTypeUnsafe)
+ if err != nil {
+ klog.Errorf("fs.GetZoneByNodeName(%s) failed: fs.getVmssFlexVMWithoutInstanceView(%s) err=%v", name, name, err)
+ return cloudprovider.Zone{}, err
+ }
+
+ var failureDomain string
+ if vm.Zones != nil && len(*vm.Zones) > 0 {
+ // Get availability zone for the node.
+ zones := *vm.Zones
+ zoneID, err := strconv.Atoi(zones[0])
+ if err != nil {
+ return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %w", zones, err)
+ }
+
+ failureDomain = fs.makeZone(to.String(vm.Location), zoneID)
+ } else if vm.VirtualMachineProperties.InstanceView != nil && vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain != nil {
+ // Availability zone is not used for the node, falling back to fault domain.
+ failureDomain = strconv.Itoa(int(to.Int32(vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)))
+ } else {
+ err = fmt.Errorf("failed to get zone info")
+ klog.Errorf("GetZoneByNodeName: got unexpected error %v", err)
+ return cloudprovider.Zone{}, err
+ }
+
+ zone := cloudprovider.Zone{
+ FailureDomain: strings.ToLower(failureDomain),
+ Region: strings.ToLower(to.String(vm.Location)),
+ }
+ return zone, nil
+}
+
+// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
+func (fs *FlexScaleSet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) {
+ vm, err := fs.getVmssFlexVM(name, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return provisioningState, err
+ }
+
+ if vm.VirtualMachineProperties == nil || vm.VirtualMachineProperties.ProvisioningState == nil {
+ return provisioningState, nil
+ }
+
+ return to.String(vm.VirtualMachineProperties.ProvisioningState), nil
+}
+
+// GetPowerStatusByNodeName returns the powerState for the specified node.
+func (fs *FlexScaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
+ vm, err := fs.getVmssFlexVM(name, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return powerState, err
+ }
+
+ if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
+ statuses := *vm.InstanceView.Statuses
+ for _, status := range statuses {
+ state := to.String(status.Code)
+ if strings.HasPrefix(state, vmPowerStatePrefix) {
+ return strings.TrimPrefix(state, vmPowerStatePrefix), nil
+ }
+ }
+ }
+
+ // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting.
+ klog.V(3).Infof("InstanceView for node %q is nil, assuming it's stopped", name)
+ return vmPowerStateStopped, nil
+}
+
+// GetPrimaryInterface gets machine primary network interface by node name.
+func (fs *FlexScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
+ machine, err := fs.getVmssFlexVM(nodeName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ klog.Errorf("fs.GetInstanceTypeByNodeName(%s) failed: fs.getVmssFlexVMWithoutInstanceView(%s) err=%v", nodeName, nodeName, err)
+ return network.Interface{}, err
+ }
+
+ primaryNicID, err := getPrimaryInterfaceID(machine)
+ if err != nil {
+ return network.Interface{}, err
+ }
+ nicName, err := getLastSegment(primaryNicID, "/")
+ if err != nil {
+ return network.Interface{}, err
+ }
+
+ nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
+ if err != nil {
+ return network.Interface{}, err
+ }
+
+ ctx, cancel := getContextWithCancel()
+ defer cancel()
+ nic, rerr := fs.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
+ if rerr != nil {
+ return network.Interface{}, rerr.Error()
+ }
+
+ return nic, nil
+}
+
+// GetIPByNodeName gets machine private IP and public IP by node name.
+func (fs *FlexScaleSet) GetIPByNodeName(name string) (string, string, error) {
+ nic, err := fs.GetPrimaryInterface(name)
+ if err != nil {
+ return "", "", err
+ }
+
+ ipConfig, err := getPrimaryIPConfig(nic)
+ if err != nil {
+ klog.Errorf("fs.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
+ return "", "", err
+ }
+
+ privateIP := *ipConfig.PrivateIPAddress
+ publicIP := ""
+ if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
+ pipID := *ipConfig.PublicIPAddress.ID
+ pipName, err := getLastSegment(pipID, "/")
+ if err != nil {
+ return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
+ }
+ pip, existsPip, err := fs.getPublicIPAddress(fs.ResourceGroup, pipName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return "", "", err
+ }
+ if existsPip {
+ publicIP = *pip.IPAddress
+ }
+ }
+
+ return privateIP, publicIP, nil
+
+}
+
+// GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4)
+// TODO (khenidak): This should read all nics, not just the primary
+// allowing users to split ipv4/v6 on multiple nics
+func (fs *FlexScaleSet) GetPrivateIPsByNodeName(name string) ([]string, error) {
+ ips := make([]string, 0)
+ nic, err := fs.GetPrimaryInterface(name)
+ if err != nil {
+ return ips, err
+ }
+
+ if nic.IPConfigurations == nil {
+ return ips, fmt.Errorf("nic.IPConfigurations for nic (nicname=%s) is nil", *nic.Name)
+ }
+
+ for _, ipConfig := range *(nic.IPConfigurations) {
+ if ipConfig.PrivateIPAddress != nil {
+ ips = append(ips, *(ipConfig.PrivateIPAddress))
+ }
+ }
+
+ return ips, nil
+}
+
+// GetNodeNameByIPConfigurationID gets the nodeName and vmSetName by IP configuration ID.
+func (fs *FlexScaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+ nodeName, vmssFlexName, _, err := fs.getNodeInformationByIPConfigurationID(ipConfigurationID)
+ if err != nil {
+ klog.Errorf("fs.GetNodeNameByIPConfigurationID(%s) failed. Error: %v", ipConfigurationID, err)
+ return "", "", err
+ }
+
+ return nodeName, strings.ToLower(vmssFlexName), nil
+}
+
+func (fs *FlexScaleSet) getNodeInformationByIPConfigurationID(ipConfigurationID string) (string, string, string, error) {
+ matches := nicIDRE.FindStringSubmatch(ipConfigurationID)
+ if len(matches) != 3 {
+ klog.V(4).Infof("Can not extract nic name from ipConfigurationID (%s)", ipConfigurationID)
+ return "", "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
+ }
+
+ nicResourceGroup, nicName := matches[1], matches[2]
+ if nicResourceGroup == "" || nicName == "" {
+ return "", "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
+ }
+
+ // get vmName by nic name
+ ctx, cancel := getContextWithCancel()
+ defer cancel()
+ nic, rerr := fs.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
+ if rerr != nil {
+ return "", "", "", fmt.Errorf("getNodeInformationByIPConfigurationID(%s): failed to get interface of name %s: %w", ipConfigurationID, nicName, rerr.Error())
+ }
+ if nic.InterfacePropertiesFormat == nil || nic.InterfacePropertiesFormat.VirtualMachine == nil || nic.InterfacePropertiesFormat.VirtualMachine.ID == nil {
+ return "", "", "", fmt.Errorf("failed to get vm ID of ip config ID %s", ipConfigurationID)
+ }
+ vmID := to.String(nic.InterfacePropertiesFormat.VirtualMachine.ID)
+ matches = vmIDRE.FindStringSubmatch(vmID)
+ if len(matches) != 2 {
+ return "", "", "", fmt.Errorf("invalid virtual machine ID %s", vmID)
+ }
+ vmName := matches[1]
+
+ nodeName, err := fs.getNodeNameByVMName(vmName)
+ if err != nil {
+ return "", "", "", fmt.Errorf("failed to map VM Name to NodeName: VM Name %s", vmName)
+ }
+
+ vmssFlexName, err := fs.getNodeVmssFlexName(nodeName)
+
+ if err != nil {
+ klog.Errorf("Unable to get the vmss flex name by node name %s: %v", vmName, err)
+ return "", "", "", err
+ }
+
+ return nodeName, strings.ToLower(vmssFlexName), nicName, nil
+}
+
+// GetNodeCIDRMaskByProviderID returns the node CIDR subnet mask by provider ID.
+func (fs *FlexScaleSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
+ nodeNameWrapper, err := fs.GetNodeNameByProviderID(providerID)
+ if err != nil {
+ klog.Errorf("Unable to get the vmss flex vm node name by providerID %s: %v", providerID, err)
+ return 0, 0, err
+ }
+ nodeName := mapNodeNameToVMName(nodeNameWrapper)
+
+ vmssFlex, err := fs.getVmssFlexByNodeName(nodeName, azcache.CacheReadTypeDefault)
+ if err != nil {
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ return consts.DefaultNodeMaskCIDRIPv4, consts.DefaultNodeMaskCIDRIPv6, nil
+ }
+ return 0, 0, err
+ }
+
+ var ipv4Mask, ipv6Mask int
+ if v4, ok := vmssFlex.Tags[consts.VMSetCIDRIPV4TagKey]; ok && v4 != nil {
+ ipv4Mask, err = strconv.Atoi(to.String(v4))
+ if err != nil {
+ klog.Errorf("GetNodeCIDRMasksByProviderID: error when paring the value of the ipv4 mask size %s: %v", to.String(v4), err)
+ }
+ }
+ if v6, ok := vmssFlex.Tags[consts.VMSetCIDRIPV6TagKey]; ok && v6 != nil {
+ ipv6Mask, err = strconv.Atoi(to.String(v6))
+ if err != nil {
+ klog.Errorf("GetNodeCIDRMasksByProviderID: error when paring the value of the ipv6 mask size%s: %v", to.String(v6), err)
+ }
+ }
+
+ return ipv4Mask, ipv6Mask, nil
+}
+
+// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
+// participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error).
+func (fs *FlexScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
+ serviceName := getServiceName(service)
+ name := mapNodeNameToVMName(nodeName)
+ vmssFlexName, err := fs.getNodeVmssFlexName(name)
+ if err != nil {
+ klog.Errorf("EnsureHostInPool: failed to get VMSS Flex Name %s: %v", name, err)
+ return "", "", "", nil, nil
+ }
+
+ // Check scale set name:
+ // - For basic SKU load balancer, return error as VMSS Flex does not support basic load balancer.
+ // - For single standard SKU load balancer, backend could belong to multiple VMSS, so we
+ // don't check vmSet for it.
+ // - For multiple standard SKU load balancers, return nil if the node's scale set is mismatched with vmSetNameOfLB
+ needCheck := false
+ if !fs.useStandardLoadBalancer() {
+ return "", "", "", nil, fmt.Errorf("EnsureHostInPool: VMSS Flex does not support Basic Load Balancer")
+ }
+ if fs.EnableMultipleStandardLoadBalancers {
+ // need to check the vmSet name when using multiple standard LBs
+ needCheck = true
+
+ // ensure the vm that is supposed to share the primary SLB in the backendpool of the primary SLB
+ if strings.EqualFold(fs.GetPrimaryVMSetName(), vmSetNameOfLB) &&
+ fs.getVMSetNamesSharingPrimarySLB().Has(strings.ToLower(vmssFlexName)) {
+ klog.V(4).Infof("EnsureHostInPool: the vm %s in the vmSet %s is supposed to share the primary SLB",
+ nodeName, vmssFlexName)
+ needCheck = false
+ }
+ }
+ if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, vmssFlexName) {
+ klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the ScaleSet %s", name, vmSetNameOfLB)
+ return "", "", "", nil, errNotInVMSet
+ }
+
+ nic, err := fs.GetPrimaryInterface(name)
+ if err != nil {
+ klog.Errorf("error: fs.EnsureHostInPool(%s), s.GetPrimaryInterface(%s), vmSetNameOfLB: %s, err=%v", name, name, vmSetNameOfLB, err)
+ return "", "", "", nil, err
+ }
+
+ if nic.ProvisioningState == consts.NicFailedState {
+ klog.Warningf("EnsureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
+ return "", "", "", nil, nil
+ }
+
+ var primaryIPConfig *network.InterfaceIPConfiguration
+ ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+ if !fs.Cloud.ipv6DualStackEnabled && !ipv6 {
+ primaryIPConfig, err = getPrimaryIPConfig(nic)
+ if err != nil {
+ return "", "", "", nil, err
+ }
+ } else {
+ primaryIPConfig, err = getIPConfigByIPFamily(nic, ipv6)
+ if err != nil {
+ return "", "", "", nil, err
+ }
+ }
+
+ foundPool := false
+ newBackendPools := []network.BackendAddressPool{}
+ if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+ newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
+ }
+ for _, existingPool := range newBackendPools {
+ if strings.EqualFold(backendPoolID, *existingPool.ID) {
+ foundPool = true
+ break
+ }
+ }
+ // The backendPoolID has already been found from existing LoadBalancerBackendAddressPools.
+ if foundPool {
+ return "", "", "", nil, nil
+ }
+
+ if fs.useStandardLoadBalancer() && len(newBackendPools) > 0 {
+ // Although standard load balancer supports backends from multiple availability
+ // sets, the same network interface couldn't be added to more than one load balancer of
+ // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
+ // about this.
+ newBackendPoolsIDs := make([]string, 0, len(newBackendPools))
+ for _, pool := range newBackendPools {
+ if pool.ID != nil {
+ newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
+ }
+ }
+ isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
+ if err != nil {
+ return "", "", "", nil, err
+ }
+ if !isSameLB {
+ klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName)
+ return "", "", "", nil, nil
+ }
+ }
+
+ newBackendPools = append(newBackendPools,
+ network.BackendAddressPool{
+ ID: to.StringPtr(backendPoolID),
+ })
+
+ primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
+
+ nicName := *nic.Name
+ klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
+ err = fs.CreateOrUpdateInterface(service, nic)
+ if err != nil {
+ return "", "", "", nil, err
+ }
+
+ // Get the node resource group.
+ nodeResourceGroup, err := fs.GetNodeResourceGroup(name)
+ if err != nil {
+ return "", "", "", nil, err
+ }
+
+ return nodeResourceGroup, vmssFlexName, name, nil, nil
+
+}
+
+func (fs *FlexScaleSet) ensureVMSSFlexInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
+ klog.V(2).Infof("ensureVMSSFlexInPool: ensuring VMSS Flex with backendPoolID %s", backendPoolID)
+ vmssFlexIDsMap := make(map[string]bool)
+
+ if !fs.useStandardLoadBalancer() {
+ return fmt.Errorf("ensureVMSSFlexInPool: VMSS Flex does not support Basic Load Balancer")
+ }
+
+ // the single standard load balancer supports multiple vmss in its backend while
+ // multiple standard load balancers doesn't
+ if fs.useStandardLoadBalancer() && !fs.EnableMultipleStandardLoadBalancers {
+ for _, node := range nodes {
+ if fs.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
+ continue
+ }
+
+ shouldExcludeLoadBalancer, err := fs.ShouldNodeExcludedFromLoadBalancer(node.Name)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
+ return err
+ }
+ if shouldExcludeLoadBalancer {
+ klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
+ continue
+ }
+
+ // in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes
+ vmssFlexID, err := fs.getNodeVmssFlexID(node.Name)
+ if err != nil {
+ klog.Error("ensureVMSSFlexInPool: failed to get VMSS Flex ID of node: %s, will skip checking and continue", node.Name)
+ continue
+ }
+ resourceGroupName, err := fs.GetNodeResourceGroup(node.Name)
+ if err != nil {
+ klog.Error("ensureVMSSFlexInPool: failed to get resource group of node: %s, will skip checking and continue", node.Name)
+ continue
+ }
+
+ // only vmsses in the resource group same as it's in azure config are included
+ if strings.EqualFold(resourceGroupName, fs.ResourceGroup) {
+ vmssFlexIDsMap[vmssFlexID] = true
+ }
+ }
+ } else {
+ vmssFlexID, err := fs.getVmssFlexIDByName(vmSetNameOfLB)
+ if err != nil {
+ klog.Error("ensureVMSSFlexInPool: failed to get VMSS Flex ID of vmSet: %s, ", vmSetNameOfLB)
+ return err
+ }
+ vmssFlexIDsMap[vmssFlexID] = true
+ }
+
+ klog.V(2).Infof("ensureVMSSFlexInPool begins to update VMSS list %v with backendPoolID %s", vmssFlexIDsMap, backendPoolID)
+ for vmssFlexID := range vmssFlexIDsMap {
+ vmssFlex, err := fs.getVmssFlexByVmssFlexID(vmssFlexID, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return err
+ }
+ vmssFlexName := *vmssFlex.Name
+
+ // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
+ // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
+ if vmssFlex.ProvisioningState != nil && strings.EqualFold(*vmssFlex.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
+ klog.V(3).Infof("ensureVMSSFlexInPool: found vmss %s being deleted, skipping", vmssFlexID)
+ continue
+ }
+
+ if vmssFlex.VirtualMachineProfile == nil || vmssFlex.VirtualMachineProfile.NetworkProfile == nil || vmssFlex.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
+ klog.V(4).Infof("ensureVMSSFlexInPool: cannot obtain the primary network interface configuration of vmss %s, just skip it as it might not have default vm profile", vmssFlexID)
+ continue
+ }
+ vmssNIC := *vmssFlex.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
+ primaryNIC, err := getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssFlexName)
+ if err != nil {
+ return err
+ }
+ var primaryIPConfig *compute.VirtualMachineScaleSetIPConfiguration
+ ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+ // Find primary network interface configuration.
+ if !fs.Cloud.ipv6DualStackEnabled && !ipv6 {
+ // Find primary IP configuration.
+ primaryIPConfig, err = getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
+ if err != nil {
+ return err
+ }
+ } else {
+ primaryIPConfig, err = getConfigForScaleSetByIPFamily(primaryNIC, "", ipv6)
+ if err != nil {
+ return err
+ }
+ }
+
+ loadBalancerBackendAddressPools := []compute.SubResource{}
+ if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+ loadBalancerBackendAddressPools = *primaryIPConfig.LoadBalancerBackendAddressPools
+ }
+
+ var found bool
+ for _, loadBalancerBackendAddressPool := range loadBalancerBackendAddressPools {
+ if strings.EqualFold(*loadBalancerBackendAddressPool.ID, backendPoolID) {
+ found = true
+ break
+ }
+ }
+ if found {
+ continue
+ }
+
+ if fs.useStandardLoadBalancer() && len(loadBalancerBackendAddressPools) > 0 {
+ // Although standard load balancer supports backends from multiple scale
+ // sets, the same network interface couldn't be added to more than one load balancer of
+ // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
+ // about this.
+ newBackendPoolsIDs := make([]string, 0, len(loadBalancerBackendAddressPools))
+ for _, pool := range loadBalancerBackendAddressPools {
+ if pool.ID != nil {
+ newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
+ }
+ }
+ isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
+ if err != nil {
+ return err
+ }
+ if !isSameLB {
+ klog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmssFlexID, oldLBName)
+ return nil
+ }
+ }
+
+ // Compose a new vmss with added backendPoolID.
+ loadBalancerBackendAddressPools = append(loadBalancerBackendAddressPools,
+ compute.SubResource{
+ ID: to.StringPtr(backendPoolID),
+ })
+ primaryIPConfig.LoadBalancerBackendAddressPools = &loadBalancerBackendAddressPools
+ newVMSS := compute.VirtualMachineScaleSet{
+ Location: vmssFlex.Location,
+ VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
+ VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
+ NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
+ NetworkInterfaceConfigurations: &vmssNIC,
+ NetworkAPIVersion: compute.TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne,
+ },
+ },
+ },
+ }
+
+ defer func() {
+ _ = fs.vmssFlexCache.Delete(consts.VmssFlexKey)
+ }()
+
+ klog.V(2).Infof("ensureVMSSFlexInPool begins to add vmss(%s) with new backendPoolID %s", vmssFlexName, backendPoolID)
+ rerr := fs.CreateOrUpdateVMSS(fs.ResourceGroup, vmssFlexName, newVMSS)
+ if rerr != nil {
+ klog.Errorf("ensureVMSSFlexInPool CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssFlexName, backendPoolID, err)
+ return rerr.Error()
+ }
+ }
+ return nil
+}
+
+// EnsureHostsInPool ensures the given Node's primary IP configurations are
+// participating in the specified LoadBalancer Backend Pool.
+func (fs *FlexScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
+ mc := metrics.NewMetricContext("services", "vmssflex_ensure_hosts_in_pool", fs.ResourceGroup, fs.SubscriptionID, getServiceName(service))
+ isOperationSucceeded := false
+ defer func() {
+ mc.ObserveOperationWithResult(isOperationSucceeded)
+ }()
+ hostUpdates := make([]func() error, 0, len(nodes))
+
+ for _, node := range nodes {
+ localNodeName := node.Name
+ if fs.useStandardLoadBalancer() && fs.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
+ klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
+ continue
+ }
+
+ shouldExcludeLoadBalancer, err := fs.ShouldNodeExcludedFromLoadBalancer(localNodeName)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
+ return err
+ }
+ if shouldExcludeLoadBalancer {
+ klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
+ continue
+ }
+
+ f := func() error {
+ _, _, _, _, err := fs.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetNameOfLB)
+ if err != nil {
+ return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %w", getServiceName(service), backendPoolID, err)
+ }
+ return nil
+ }
+ hostUpdates = append(hostUpdates, f)
+ }
+
+ errs := utilerrors.AggregateGoroutines(hostUpdates...)
+ if errs != nil {
+ return utilerrors.Flatten(errs)
+ }
+
+ err := fs.ensureVMSSFlexInPool(service, nodes, backendPoolID, vmSetNameOfLB)
+ if err != nil {
+ return err
+ }
+
+ isOperationSucceeded = true
+ return nil
+}
+
+func (fs *FlexScaleSet) ensureBackendPoolDeletedFromVmssFlex(backendPoolID string, vmSetName string) error {
+ vmssNamesMap := make(map[string]bool)
+ if fs.useStandardLoadBalancer() && !fs.EnableMultipleStandardLoadBalancers {
+ cached, err := fs.vmssFlexCache.Get(consts.VmssFlexKey, azcache.CacheReadTypeDefault)
+ if err != nil {
+ klog.Errorf("ensureBackendPoolDeletedFromVmssFlex: failed to get vmss flex from cache: %v", err)
+ return err
+ }
+ vmssFlexes := cached.(*sync.Map)
+ vmssFlexes.Range(func(key, value interface{}) bool {
+ vmssFlex := value.(*compute.VirtualMachineScaleSet)
+ vmssNamesMap[to.String(vmssFlex.Name)] = true
+ return true
+ })
+ } else {
+ vmssNamesMap[vmSetName] = true
+ }
+ return fs.EnsureBackendPoolDeletedFromVMSets(vmssNamesMap, backendPoolID)
+}
+
+// EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS Flex
+func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]bool, backendPoolID string) error {
+ vmssUpdaters := make([]func() error, 0, len(vmssNamesMap))
+ errors := make([]error, 0, len(vmssNamesMap))
+ for vmssName := range vmssNamesMap {
+ vmssName := vmssName
+ vmss, err := fs.getVmssFlexByName(vmssName)
+ if err != nil {
+ klog.Errorf("fs.EnsureBackendPoolDeletedFromVMSets: failed to get VMSS %s: %v", vmssName, err)
+ errors = append(errors, err)
+ continue
+ }
+
+ // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
+ // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
+ if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
+ klog.V(3).Infof("fs.EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName)
+ continue
+ }
+ if vmss.VirtualMachineProfile == nil || vmss.VirtualMachineProfile.NetworkProfile == nil || vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
+ klog.V(4).Infof("fs.EnsureBackendPoolDeletedFromVMSets: cannot obtain the primary network interface configurations, of vmss %s", vmssName)
+ continue
+ }
+ vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
+ primaryNIC, err := getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
+ if err != nil {
+ klog.Errorf("fs.EnsureBackendPoolDeletedFromVMSets: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
+ errors = append(errors, err)
+ continue
+ }
+ primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
+ if err != nil {
+ klog.Errorf("fs.EnsureBackendPoolDeletedFromVMSets: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
+ errors = append(errors, err)
+ continue
+ }
+ loadBalancerBackendAddressPools := []compute.SubResource{}
+ if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+ loadBalancerBackendAddressPools = *primaryIPConfig.LoadBalancerBackendAddressPools
+ }
+
+ var found bool
+ var newBackendPools []compute.SubResource
+ for i := len(loadBalancerBackendAddressPools) - 1; i >= 0; i-- {
+ curPool := loadBalancerBackendAddressPools[i]
+ if strings.EqualFold(backendPoolID, *curPool.ID) {
+ klog.V(10).Infof("fs.EnsureBackendPoolDeletedFromVMSets gets unwanted backend pool %q for VMSS %s", backendPoolID, vmssName)
+ found = true
+ newBackendPools = append(loadBalancerBackendAddressPools[:i], loadBalancerBackendAddressPools[i+1:]...)
+ }
+ }
+ if !found {
+ continue
+ }
+
+ vmssUpdaters = append(vmssUpdaters, func() error {
+ // Compose a new vmss with added backendPoolID.
+ primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
+ newVMSS := compute.VirtualMachineScaleSet{
+ Location: vmss.Location,
+ VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
+ VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
+ NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
+ NetworkInterfaceConfigurations: &vmssNIC,
+ NetworkAPIVersion: compute.TwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne,
+ },
+ },
+ },
+ }
+
+ defer func() {
+ _ = fs.vmssFlexCache.Delete(consts.VmssFlexKey)
+ }()
+
+ klog.V(2).Infof("fs.EnsureBackendPoolDeletedFromVMSets begins to delete backendPoolID %s from vmss(%s)", backendPoolID, vmssName)
+ rerr := fs.CreateOrUpdateVMSS(fs.ResourceGroup, vmssName, newVMSS)
+ if rerr != nil {
+ klog.Errorf("fs.EnsureBackendPoolDeletedFromVMSets CreateOrUpdateVMSS(%s) for backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
+ return rerr.Error()
+ }
+
+ return nil
+ })
+ }
+
+ errs := utilerrors.AggregateGoroutines(vmssUpdaters...)
+ if errs != nil {
+ return utilerrors.Flatten(errs)
+ }
+ // Fail if there are other errors.
+ if len(errors) > 0 {
+ return utilerrors.Flatten(utilerrors.NewAggregate(errors))
+ }
+
+ return nil
+}
+
+// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
+func (fs *FlexScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) (bool, error) {
+ // Returns nil if backend address pools already deleted.
+ if backendAddressPools == nil {
+ return false, nil
+ }
+
+ mc := metrics.NewMetricContext("services", "vmssflex_ensure_backend_pool_deleted", fs.ResourceGroup, fs.SubscriptionID, getServiceName(service))
+ isOperationSucceeded := false
+ defer func() {
+ mc.ObserveOperationWithResult(isOperationSucceeded)
+ }()
+
+ ipConfigurationIDs := []string{}
+ for _, backendPool := range *backendAddressPools {
+ if strings.EqualFold(to.String(backendPool.ID), backendPoolID) && backendPool.BackendAddressPoolPropertiesFormat != nil && backendPool.BackendIPConfigurations != nil {
+ for _, ipConf := range *backendPool.BackendIPConfigurations {
+ if ipConf.ID == nil {
+ continue
+ }
+
+ ipConfigurationIDs = append(ipConfigurationIDs, *ipConf.ID)
+ }
+ }
+ }
+
+ vmssFlexVMNameMap := make(map[string]string)
+ allErrs := make([]error, 0)
+ for i := range ipConfigurationIDs {
+ ipConfigurationID := ipConfigurationIDs[i]
+ nodeName, vmssFlexName, nicName, err := fs.getNodeInformationByIPConfigurationID(ipConfigurationID)
+ if err != nil {
+ continue
+ }
+ if nodeName == "" {
+ continue
+ }
+ resourceGroupName, err := fs.GetNodeResourceGroup(nodeName)
+ if err != nil {
+ continue
+ }
+ // only vmsses in the resource group same as it's in azure config are included
+ if strings.EqualFold(resourceGroupName, fs.ResourceGroup) {
+ if fs.useStandardLoadBalancer() && !fs.EnableMultipleStandardLoadBalancers {
+ vmssFlexVMNameMap[nodeName] = nicName
+ } else {
+ if strings.EqualFold(vmssFlexName, vmSetName) {
+ vmssFlexVMNameMap[nodeName] = nicName
+ } else {
+ // Only remove nodes belonging to specified vmSet.
+ continue
+ }
+ }
+
+ }
+ }
+
+ // 1. Ensure the backendPoolID is deleted from the VMSS.
+ klog.V(2).Infof("1. Ensure the backendPoolID is deleted from the VMSS.")
+ if deleteFromVMSet {
+ err := fs.ensureBackendPoolDeletedFromVmssFlex(backendPoolID, vmSetName)
+ if err != nil {
+ allErrs = append(allErrs, err)
+ }
+ }
+
+ klog.V(2).Infof("2. Ensure the backendPoolID is deleted from the VMSS VMs.")
+ // 2. Ensure the backendPoolID is deleted from the VMSS VMs.
+ klog.V(2).Infof("go into fs.ensureBackendPoolDeletedFromNode, vmssFlexVMNameMap: %s, size: %s", vmssFlexVMNameMap, len(vmssFlexVMNameMap))
+ nicUpdated, err := fs.ensureBackendPoolDeletedFromNode(vmssFlexVMNameMap, backendPoolID)
+ klog.V(2).Infof("exit from fs.ensureBackendPoolDeletedFromNode")
+ if err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ if len(allErrs) > 0 {
+ return nicUpdated, utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
+ }
+
+ isOperationSucceeded = true
+ return nicUpdated, nil
+
+}
+
+func (fs *FlexScaleSet) ensureBackendPoolDeletedFromNode(vmssFlexVMNameMap map[string]string, backendPoolID string) (bool, error) {
+ nicUpdaters := make([]func() error, 0)
+ allErrs := make([]error, 0)
+ i := 0
+ var nicUpdated bool
+ for nodeName, nicName := range vmssFlexVMNameMap {
+ i++
+ klog.V(2).Infof("i = %s", i)
+
+ ctx, cancel := getContextWithCancel()
+ defer cancel()
+ nic, rerr := fs.InterfacesClient.Get(ctx, fs.ResourceGroup, nicName, "")
+ if rerr != nil {
+ return false, fmt.Errorf("ensureBackendPoolDeletedFromNode: failed to get interface of name %s: %w", nicName, rerr.Error())
+ }
+
+ if nic.ProvisioningState == consts.NicFailedState {
+ klog.Warningf("EnsureBackendPoolDeleted skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
+ continue
+ }
+
+ if nic.InterfacePropertiesFormat != nil && nic.InterfacePropertiesFormat.IPConfigurations != nil {
+ newIPConfigs := *nic.IPConfigurations
+ for j, ipConf := range newIPConfigs {
+ if !to.Bool(ipConf.Primary) {
+ continue
+ }
+ // found primary ip configuration
+ if ipConf.LoadBalancerBackendAddressPools != nil {
+ newLBAddressPools := *ipConf.LoadBalancerBackendAddressPools
+ for k := len(newLBAddressPools) - 1; k >= 0; k-- {
+ pool := newLBAddressPools[k]
+ if strings.EqualFold(to.String(pool.ID), backendPoolID) {
+ newLBAddressPools = append(newLBAddressPools[:k], newLBAddressPools[k+1:]...)
+ break
+ }
+ }
+ newIPConfigs[j].LoadBalancerBackendAddressPools = &newLBAddressPools
+ }
+ }
+ nic.IPConfigurations = &newIPConfigs
+
+ nicUpdaters = append(nicUpdaters, func() error {
+ ctx, cancel := getContextWithCancel()
+ defer cancel()
+ klog.V(2).Infof("EnsureBackendPoolDeleted begins to CreateOrUpdate for NIC(%s, %s) with backendPoolID %s", fs.resourceGroup, to.String(nic.Name), backendPoolID)
+ rerr := fs.InterfacesClient.CreateOrUpdate(ctx, fs.ResourceGroup, to.String(nic.Name), nic)
+ if rerr != nil {
+ klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", fs.resourceGroup, to.String(nic.Name), rerr.Error())
+ return rerr.Error()
+ }
+ nicUpdated = true
+ klog.V(2).Infof("EnsureBackendPoolDeleted done")
+ return nil
+ })
+ }
+ }
+ klog.V(2).Infof("nicUpdaters size: %s", len(nicUpdaters))
+ errs := utilerrors.AggregateGoroutines(nicUpdaters...)
+ if errs != nil {
+ allErrs = append(allErrs, utilerrors.Flatten(errs))
+ }
+ if len(allErrs) > 0 {
+ return nicUpdated, utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
+ }
+ return nicUpdated, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go
new file mode 100644
index 000000000000..efca4cb12eaf
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go
@@ -0,0 +1,333 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
+
+ cloudprovider "k8s.io/cloud-provider"
+ "k8s.io/klog/v2"
+
+ azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+ "sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+func (fs *FlexScaleSet) newVmssFlexCache(ctx context.Context) (*azcache.TimedCache, error) {
+ getter := func(key string) (interface{}, error) {
+ localCache := &sync.Map{}
+
+ allResourceGroups, err := fs.GetResourceGroups()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, resourceGroup := range allResourceGroups.List() {
+ allScaleSets, rerr := fs.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
+ if rerr != nil {
+ if rerr.IsNotFound() {
+ klog.Warningf("Skip caching vmss for resource group %s due to error: %v", resourceGroup, rerr.Error())
+ continue
+ }
+ klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr)
+ return nil, rerr.Error()
+ }
+
+ for i := range allScaleSets {
+ scaleSet := allScaleSets[i]
+ if scaleSet.ID == nil || *scaleSet.ID == "" {
+ klog.Warning("failed to get the ID of VMSS Flex")
+ continue
+ }
+
+ if scaleSet.OrchestrationMode == compute.Flexible {
+ localCache.Store(*scaleSet.ID, &scaleSet)
+ }
+ }
+ }
+
+ return localCache, nil
+ }
+
+ if fs.Config.VmssFlexCacheTTLInSeconds == 0 {
+ fs.Config.VmssFlexCacheTTLInSeconds = consts.VmssFlexCacheTTLDefaultInSeconds
+ }
+ return azcache.NewTimedcache(time.Duration(fs.Config.VmssFlexCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (fs *FlexScaleSet) newVmssFlexVMCache(ctx context.Context) (*azcache.TimedCache, error) {
+ getter := func(key string) (interface{}, error) {
+ localCache := &sync.Map{}
+
+ vms, rerr := fs.VirtualMachinesClient.ListVmssFlexVMsWithoutInstanceView(ctx, key)
+ if rerr != nil {
+ klog.Errorf("ListVmssFlexVMsWithoutInstanceView failed: %v", rerr)
+ return nil, rerr.Error()
+ }
+
+ for i := range vms {
+ vm := vms[i]
+ if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
+ localCache.Store(strings.ToLower(*vm.OsProfile.ComputerName), &vm)
+ fs.vmssFlexVMNameToVmssID.Store(strings.ToLower(*vm.OsProfile.ComputerName), key)
+ fs.vmssFlexVMNameToNodeName.Store(*vm.Name, strings.ToLower(*vm.OsProfile.ComputerName))
+ }
+ }
+
+ vms, rerr = fs.VirtualMachinesClient.ListVmssFlexVMsWithOnlyInstanceView(ctx, key)
+ if rerr != nil {
+ klog.Errorf("ListVmssFlexVMsWithOnlyInstanceView failed: %v", rerr)
+ return nil, rerr.Error()
+ }
+
+ for i := range vms {
+ vm := vms[i]
+ if vm.Name != nil {
+ nodeName, ok := fs.vmssFlexVMNameToNodeName.Load(*vm.Name)
+ if !ok {
+ continue
+ }
+
+ cached, ok := localCache.Load(nodeName)
+ if ok {
+ cachedVM := cached.(*compute.VirtualMachine)
+ cachedVM.VirtualMachineProperties.InstanceView = vm.VirtualMachineProperties.InstanceView
+ }
+ }
+ }
+
+ return localCache, nil
+ }
+
+ if fs.Config.VmssFlexVMCacheTTLInSeconds == 0 {
+ fs.Config.VmssFlexVMCacheTTLInSeconds = consts.VmssFlexVMCacheTTLDefaultInSeconds
+ }
+ return azcache.NewTimedcache(time.Duration(fs.Config.VmssFlexVMCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (fs *FlexScaleSet) getNodeNameByVMName(vmName string) (string, error) {
+ fs.lockMap.LockEntry(consts.GetNodeVmssFlexIDLockKey)
+ defer fs.lockMap.UnlockEntry(consts.GetNodeVmssFlexIDLockKey)
+ cachedNodeName, isCached := fs.vmssFlexVMNameToNodeName.Load(vmName)
+ if isCached {
+ return fmt.Sprintf("%v", cachedNodeName), nil
+ }
+
+ getter := func(vmName string, crt azcache.AzureCacheReadType) (string, error) {
+ cached, err := fs.vmssFlexCache.Get(consts.VmssFlexKey, crt)
+ if err != nil {
+ return "", err
+ }
+ vmssFlexes := cached.(*sync.Map)
+
+ vmssFlexes.Range(func(key, value interface{}) bool {
+ vmssFlexID := key.(string)
+ _, err := fs.vmssFlexVMCache.Get(vmssFlexID, azcache.CacheReadTypeForceRefresh)
+ if err != nil {
+ klog.Errorf("failed to refresh vmss flex VM cache for vmssFlexID %s", vmssFlexID)
+ }
+ return true
+ })
+
+ cachedNodeName, isCached = fs.vmssFlexVMNameToNodeName.Load(vmName)
+ if isCached {
+ return fmt.Sprintf("%v", cachedNodeName), nil
+ }
+ return "", cloudprovider.InstanceNotFound
+ }
+
+ nodeName, err := getter(vmName, azcache.CacheReadTypeDefault)
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ klog.V(2).Infof("Could not find node (%s) in the existing cache. Forcely freshing the cache to check again...", nodeName)
+ return getter(vmName, azcache.CacheReadTypeForceRefresh)
+ }
+ return nodeName, err
+
+}
+
+func (fs *FlexScaleSet) getNodeVmssFlexID(nodeName string) (string, error) {
+ fs.lockMap.LockEntry(consts.GetNodeVmssFlexIDLockKey)
+ defer fs.lockMap.UnlockEntry(consts.GetNodeVmssFlexIDLockKey)
+ cachedVmssFlexID, isCached := fs.vmssFlexVMNameToVmssID.Load(nodeName)
+
+ if isCached {
+ return fmt.Sprintf("%v", cachedVmssFlexID), nil
+ }
+
+ getter := func(nodeName string, crt azcache.AzureCacheReadType) (string, error) {
+ cached, err := fs.vmssFlexCache.Get(consts.VmssFlexKey, crt)
+ if err != nil {
+ return "", err
+ }
+ vmssFlexes := cached.(*sync.Map)
+
+ vmssFlexes.Range(func(key, value interface{}) bool {
+ vmssFlexID := key.(string)
+ _, err := fs.vmssFlexVMCache.Get(vmssFlexID, azcache.CacheReadTypeForceRefresh)
+ if err != nil {
+ klog.Errorf("failed to refresh vmss flex VM cache for vmssFlexID %s", vmssFlexID)
+ }
+ return true
+ })
+
+ cachedVmssFlexID, isCached = fs.vmssFlexVMNameToVmssID.Load(nodeName)
+ if isCached {
+ return fmt.Sprintf("%v", cachedVmssFlexID), nil
+ }
+ return "", cloudprovider.InstanceNotFound
+ }
+
+ vmssFlexID, err := getter(nodeName, azcache.CacheReadTypeDefault)
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ klog.V(2).Infof("Could not find node (%s) in the existing cache. Forcely freshing the cache to check again...", nodeName)
+ return getter(nodeName, azcache.CacheReadTypeForceRefresh)
+ }
+ return vmssFlexID, err
+
+}
+
+func (fs *FlexScaleSet) getVmssFlexVM(nodeName string, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) {
+ vmssFlexID, err := fs.getNodeVmssFlexID(nodeName)
+ if err != nil {
+ return vm, err
+ }
+
+ cached, err := fs.vmssFlexVMCache.Get(vmssFlexID, crt)
+ if err != nil {
+ return vm, err
+ }
+ vmMap := cached.(*sync.Map)
+ cachedVM, ok := vmMap.Load(nodeName)
+ if !ok {
+ klog.V(2).Infof("did not find node (%s) in the existing cache, which means it is deleted...", nodeName)
+ return vm, cloudprovider.InstanceNotFound
+ }
+
+ return *(cachedVM.(*compute.VirtualMachine)), nil
+}
+
+func (fs *FlexScaleSet) getVmssFlexByVmssFlexID(vmssFlexID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSet, error) {
+ cached, err := fs.vmssFlexCache.Get(consts.VmssFlexKey, crt)
+ if err != nil {
+ return nil, err
+ }
+ vmssFlexes := cached.(*sync.Map)
+ if vmssFlex, ok := vmssFlexes.Load(vmssFlexID); ok {
+ result := vmssFlex.(*compute.VirtualMachineScaleSet)
+ return result, nil
+ }
+
+ klog.V(2).Infof("Couldn't find VMSS Flex with ID %s, refreshing the cache", vmssFlexID)
+ cached, err = fs.vmssFlexCache.Get(consts.VmssFlexKey, azcache.CacheReadTypeForceRefresh)
+ if err != nil {
+ return nil, err
+ }
+ vmssFlexes = cached.(*sync.Map)
+ if vmssFlex, ok := vmssFlexes.Load(vmssFlexID); ok {
+ result := vmssFlex.(*compute.VirtualMachineScaleSet)
+ return result, nil
+ }
+ return nil, cloudprovider.InstanceNotFound
+}
+
+func (fs *FlexScaleSet) getVmssFlexByNodeName(nodeName string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSet, error) {
+ vmssFlexID, err := fs.getNodeVmssFlexID(nodeName)
+ if err != nil {
+ return nil, err
+ }
+ vmssFlex, err := fs.getVmssFlexByVmssFlexID(vmssFlexID, crt)
+ if err != nil {
+ return nil, err
+ }
+ return vmssFlex, nil
+}
+
+func (fs *FlexScaleSet) getVmssFlexIDByName(vmssFlexName string) (string, error) {
+ cached, err := fs.vmssFlexCache.Get(consts.VmssFlexKey, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return "", err
+ }
+ var targetVmssFlexID string
+ vmssFlexes := cached.(*sync.Map)
+ vmssFlexes.Range(func(key, value interface{}) bool {
+ vmssFlexID := key.(string)
+ name, err := getLastSegment(vmssFlexID, "/")
+ if err != nil {
+ return true
+ }
+ if strings.EqualFold(name, vmssFlexName) {
+ targetVmssFlexID = vmssFlexID
+ return false
+ }
+ return true
+ })
+ if targetVmssFlexID != "" {
+ return targetVmssFlexID, nil
+ }
+ return "", cloudprovider.InstanceNotFound
+}
+
+func (fs *FlexScaleSet) getVmssFlexByName(vmssFlexName string) (*compute.VirtualMachineScaleSet, error) {
+ cached, err := fs.vmssFlexCache.Get(consts.VmssFlexKey, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return nil, err
+ }
+
+ var targetVmssFlex *compute.VirtualMachineScaleSet
+ vmssFlexes := cached.(*sync.Map)
+ vmssFlexes.Range(func(key, value interface{}) bool {
+ vmssFlexID := key.(string)
+ vmssFlex := value.(*compute.VirtualMachineScaleSet)
+ name, err := getLastSegment(vmssFlexID, "/")
+ if err != nil {
+ return true
+ }
+ if strings.EqualFold(name, vmssFlexName) {
+ targetVmssFlex = vmssFlex
+ return false
+ }
+ return true
+ })
+ if targetVmssFlex != nil {
+ return targetVmssFlex, nil
+ }
+ return nil, cloudprovider.InstanceNotFound
+}
+
+func (fs *FlexScaleSet) DeleteCacheForNode(nodeName string) error {
+ vmssFlexID, err := fs.getNodeVmssFlexID(nodeName)
+ if err != nil {
+ return err
+ }
+
+ cached, err := fs.vmssFlexVMCache.Get(vmssFlexID, azcache.CacheReadTypeDefault)
+ if err != nil {
+ return err
+ }
+ vmMap := cached.(*sync.Map)
+ vmMap.Delete(nodeName)
+
+ fs.vmssFlexVMNameToVmssID.Delete(nodeName)
+
+ return nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
index 6f82d0c128db..05e965ea83d1 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
@@ -23,7 +23,7 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-08-01/network"
"github.com/Azure/go-autorest/autorest/to"
@@ -63,9 +63,9 @@ func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) {
return false, err
}
-/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
-/// The service side has throttling control that delays responses if there are multiple requests onto certain vm
-/// resource request in short period.
+// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
+// The service side has throttling control that delays responses if there are multiple requests onto certain vm
+// resource request in short period.
func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) {
vmName := string(nodeName)
cachedVM, err := az.vmCache.Get(vmName, crt)
@@ -86,7 +86,7 @@ func (az *Cloud) getRouteTable(crt azcache.AzureCacheReadType) (routeTable netwo
return routeTable, false, fmt.Errorf("Route table name is not configured")
}
- cachedRt, err := az.rtCache.Get(az.RouteTableName, crt)
+ cachedRt, err := az.rtCache.GetWithDeepCopy(az.RouteTableName, crt)
if err != nil {
return routeTable, false, err
}
@@ -109,7 +109,7 @@ func (az *Cloud) getPIPCacheKey(pipResourceGroup string, pipName string) string
func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string, crt azcache.AzureCacheReadType) (network.PublicIPAddress, bool, error) {
pip := network.PublicIPAddress{}
cacheKey := az.getPIPCacheKey(pipResourceGroup, pipName)
- cachedPIP, err := az.pipCache.Get(cacheKey, crt)
+ cachedPIP, err := az.pipCache.GetWithDeepCopy(cacheKey, crt)
if err != nil {
return pip, false, err
}
@@ -145,8 +145,8 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (networ
return subnet, exists, nil
}
-func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadType) (lb network.LoadBalancer, exists bool, err error) {
- cachedLB, err := az.lbCache.Get(name, crt)
+func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadType) (lb *network.LoadBalancer, exists bool, err error) {
+ cachedLB, err := az.lbCache.GetWithDeepCopy(name, crt)
if err != nil {
return lb, false, err
}
@@ -155,7 +155,7 @@ func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadTyp
return lb, false, nil
}
- return *(cachedLB.(*network.LoadBalancer)), true, nil
+ return cachedLB.(*network.LoadBalancer), true, nil
}
func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.SecurityGroup, error) {
@@ -164,7 +164,7 @@ func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.Secur
return nsg, fmt.Errorf("securityGroupName is not configured")
}
- securityGroup, err := az.nsgCache.Get(az.SecurityGroupName, crt)
+ securityGroup, err := az.nsgCache.GetWithDeepCopy(az.SecurityGroupName, crt)
if err != nil {
return nsg, err
}
@@ -177,7 +177,7 @@ func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.Secur
}
func (az *Cloud) getPrivateLinkService(frontendIPConfigID *string, crt azcache.AzureCacheReadType) (pls network.PrivateLinkService, err error) {
- cachedPLS, err := az.plsCache.Get(*frontendIPConfigID, crt)
+ cachedPLS, err := az.plsCache.GetWithDeepCopy(*frontendIPConfigID, crt)
if err != nil {
return pls, err
}
@@ -405,8 +405,8 @@ func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
return !azureNodeProviderIDRE.Match([]byte(providerID))
}
-// convertResourceGroupNameToLower converts the resource group name in the resource ID to be lowered.
-func convertResourceGroupNameToLower(resourceID string) (string, error) {
+// ConvertResourceGroupNameToLower converts the resource group name in the resource ID to be lowered.
+func ConvertResourceGroupNameToLower(resourceID string) (string, error) {
matches := azureResourceGroupNameRE.FindStringSubmatch(resourceID)
if len(matches) != 2 {
return "", fmt.Errorf("%q isn't in Azure resource ID format %q", resourceID, azureResourceGroupNameRE.String())
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/azure_auth.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go
similarity index 87%
rename from cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/azure_auth.go
rename to cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go
index c641f5cd03e3..a7ba9ea9bfea 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/azure_auth.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go
@@ -14,15 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package auth
+package config
import (
"crypto/rsa"
"crypto/x509"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
+ "os"
"strings"
+ "sigs.k8s.io/yaml"
+
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
@@ -38,6 +42,10 @@ var (
ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider")
)
+const (
+ maxReadLength = 10 * 1 << 20 // 10MB
+)
+
// AzureAuthConfig holds auth related part of cloud config
type AzureAuthConfig struct {
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
@@ -110,7 +118,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment, r
config.UserAssignedIdentityID)
}
- klog.V(4).Info("azure: User Assigned MSI ID is client ID. Resource ID parsing error: %+v", err)
+ klog.V(4).Info("azure: User Assigned MSI ID is client ID")
return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
resource,
config.UserAssignedIdentityID)
@@ -137,7 +145,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment, r
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
- certData, err := ioutil.ReadFile(config.AADClientCertPath)
+ certData, err := os.ReadFile(config.AADClientCertPath)
if err != nil {
return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err)
}
@@ -248,12 +256,47 @@ func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem st
return &env, err
}
-// UsesNetworkResourceInDifferentTenantOrSubscription determines whether the AzureAuthConfig indicates to use network resources in different AAD Tenant and Subscription than those for the cluster
-// Return true when one of NetworkResourceTenantID and NetworkResourceSubscriptionID are specified
-// and equal to one defined in global configs
-func (config *AzureAuthConfig) UsesNetworkResourceInDifferentTenantOrSubscription() bool {
- return (len(config.NetworkResourceTenantID) > 0 && !strings.EqualFold(config.NetworkResourceTenantID, config.TenantID)) ||
- (len(config.NetworkResourceSubscriptionID) > 0 && !strings.EqualFold(config.NetworkResourceSubscriptionID, config.SubscriptionID))
+// ParseAzureAuthConfig returns a parsed configuration for an Azure cloudprovider config file
+func ParseAzureAuthConfig(configReader io.Reader) (*AzureAuthConfig, *azure.Environment, error) {
+ var config AzureAuthConfig
+
+ if configReader == nil {
+ return nil, nil, errors.New("nil config is provided")
+ }
+
+ limitedReader := &io.LimitedReader{R: configReader, N: maxReadLength}
+ configContents, err := io.ReadAll(limitedReader)
+ if err != nil {
+ return nil, nil, err
+ }
+ if limitedReader.N <= 0 {
+ return nil, nil, errors.New("the read limit is reached")
+ }
+ err = yaml.Unmarshal(configContents, &config)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ environment, err := ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &config, environment, nil
+}
+
+// UsesNetworkResourceInDifferentTenant determines whether the AzureAuthConfig indicates to use network resources in
+// different AAD Tenant than those for the cluster. Return true when NetworkResourceTenantID is specified and not equal
+// to one defined in global configs
+func (config *AzureAuthConfig) UsesNetworkResourceInDifferentTenant() bool {
+ return len(config.NetworkResourceTenantID) > 0 && !strings.EqualFold(config.NetworkResourceTenantID, config.TenantID)
+}
+
+// UsesNetworkResourceInDifferentSubscription determines whether the AzureAuthConfig indicates to use network resources
+// in different Subscription than those for the cluster. Return true when NetworkResourceSubscriptionID is specified
+// and not equal to one defined in global configs
+func (config *AzureAuthConfig) UsesNetworkResourceInDifferentSubscription() bool {
+ return len(config.NetworkResourceSubscriptionID) > 0 && !strings.EqualFold(config.NetworkResourceSubscriptionID, config.SubscriptionID)
}
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
@@ -285,8 +328,8 @@ func azureStackOverrides(env *azure.Environment, resourceManagerEndpoint, identi
// checkConfigWhenNetworkResourceInDifferentTenant checks configuration for the scenario of using network resource in different tenant
func (config *AzureAuthConfig) checkConfigWhenNetworkResourceInDifferentTenant() error {
- if !config.UsesNetworkResourceInDifferentTenantOrSubscription() {
- return fmt.Errorf("NetworkResourceTenantID and NetworkResourceSubscriptionID must be configured")
+ if !config.UsesNetworkResourceInDifferentTenant() {
+ return fmt.Errorf("NetworkResourceTenantID must be configured")
}
if strings.EqualFold(config.IdentitySystem, consts.ADFSIdentitySystem) {
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go
similarity index 84%
rename from cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go
rename to cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go
index b9f92064717d..9183142cdf33 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package provider
+package config
import (
azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
@@ -22,8 +22,8 @@ import (
)
const (
- defaultAtachDetachDiskQPS = 6.0
- defaultAtachDetachDiskBucket = 10
+ DefaultAtachDetachDiskQPS = 6.0
+ DefaultAtachDetachDiskBucket = 10
)
// CloudProviderRateLimitConfig indicates the rate limit config for each clients.
@@ -47,6 +47,13 @@ type CloudProviderRateLimitConfig struct {
VirtualMachineSizeRateLimit *azclients.RateLimitConfig `json:"virtualMachineSizesRateLimit,omitempty" yaml:"virtualMachineSizesRateLimit,omitempty"`
AvailabilitySetRateLimit *azclients.RateLimitConfig `json:"availabilitySetRateLimit,omitempty" yaml:"availabilitySetRateLimit,omitempty"`
AttachDetachDiskRateLimit *azclients.RateLimitConfig `json:"attachDetachDiskRateLimit,omitempty" yaml:"attachDetachDiskRateLimit,omitempty"`
+ ContainerServiceRateLimit *azclients.RateLimitConfig `json:"containerServiceRateLimit,omitempty" yaml:"containerServiceRateLimit,omitempty"`
+ DeploymentRateLimit *azclients.RateLimitConfig `json:"deploymentRateLimit,omitempty" yaml:"deploymentRateLimit,omitempty"`
+ PrivateDNSRateLimit *azclients.RateLimitConfig `json:"privateDNSRateLimit,omitempty" yaml:"privateDNSRateLimit,omitempty"`
+ PrivateDNSZoneGroupRateLimit *azclients.RateLimitConfig `json:"privateDNSZoneGroupRateLimit,omitempty" yaml:"privateDNSZoneGroupRateLimit,omitempty"`
+ PrivateEndpointRateLimit *azclients.RateLimitConfig `json:"privateEndpointRateLimit,omitempty" yaml:"privateEndpointRateLimit,omitempty"`
+ PrivateLinkServiceRateLimit *azclients.RateLimitConfig `json:"privateLinkServiceRateLimit,omitempty" yaml:"privateLinkServiceRateLimit,omitempty"`
+ VirtualNetworkRateLimit *azclients.RateLimitConfig `json:"virtualNetworkRateLimit,omitempty" yaml:"virtualNetworkRateLimit,omitempty"`
}
// InitializeCloudProviderRateLimitConfig initializes rate limit configs.
@@ -87,8 +94,8 @@ func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig
atachDetachDiskRateLimitConfig := azclients.RateLimitConfig{
CloudProviderRateLimit: true,
- CloudProviderRateLimitQPSWrite: defaultAtachDetachDiskQPS,
- CloudProviderRateLimitBucketWrite: defaultAtachDetachDiskBucket,
+ CloudProviderRateLimitQPSWrite: DefaultAtachDetachDiskQPS,
+ CloudProviderRateLimitBucketWrite: DefaultAtachDetachDiskBucket,
}
config.AttachDetachDiskRateLimit = overrideDefaultRateLimitConfig(&atachDetachDiskRateLimitConfig, config.AttachDetachDiskRateLimit)
}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/doc.go
similarity index 88%
rename from cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/doc.go
rename to cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/doc.go
index 52e1d5da9aa1..3ad7a5daae32 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/doc.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package auth provides a general library to authorize Azure ARM clients.
-package auth // import "sigs.k8s.io/cloud-provider-azure/pkg/auth"
+package config // import "sigs.k8s.io/cloud-provider-azure/pkg/provider/config"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go
index fc028ff790be..098bb8dad31b 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go
@@ -17,7 +17,7 @@ limitations under the License.
package virtualmachine
import (
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
)
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go
index 33e935591188..d5ddae531ddc 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go
@@ -262,22 +262,35 @@ func getRetryAfter(resp *http.Response) time.Duration {
return dur
}
-// GetErrorWithRetriableHTTPStatusCodes gets an error with RetriableHTTPStatusCodes.
-// It is used to retry on some HTTPStatusCodes.
-func GetErrorWithRetriableHTTPStatusCodes(resp *http.Response, err error, retriableHTTPStatusCodes []int) *Error {
- rerr := GetError(resp, err)
+// IsInHTTPStatusCodeSet return true when status code falls in the status code list
+// It is used with doBackoffRetry to retry on some HTTPStatusCodes.
+func IsInHTTPStatusCodeSet(rerr *Error, httpStatusCodes []int) bool {
if rerr == nil {
- return nil
+ return false
}
-
- for _, code := range retriableHTTPStatusCodes {
+ for _, code := range httpStatusCodes {
if rerr.HTTPStatusCode == code {
- rerr.Retriable = true
- break
+ return true
}
}
- return rerr
+ return false
+}
+
+// isInErrorsSet return true when error message falls in the error message set
+// It is used with doBackoffRetry to retry on some errors.
+func isInErrorsSet(rerr *Error, errorMsgs []string) bool {
+
+ if rerr == nil {
+ return false
+ }
+
+ for _, err := range errorMsgs {
+ if strings.Contains(rerr.RawError.Error(), err) {
+ return true
+ }
+ }
+ return false
}
// GetStatusNotFoundAndForbiddenIgnoredError gets an error with StatusNotFound and StatusForbidden ignored.
@@ -339,7 +352,7 @@ func GetVMSSMetadataByRawError(err *Error) (string, string, error) {
reg := regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+).`)
matches := reg.FindStringSubmatch(err.ServiceErrorMessage())
if len(matches) != 3 {
- return "", "", fmt.Errorf("GetVMSSMetadataByRawError: couldn't find a VMSS resource Id from error message %s", err.RawError)
+ return "", "", fmt.Errorf("GetVMSSMetadataByRawError: couldn't find a VMSS resource Id from error message %w", err.RawError)
}
return matches[1], matches[2], nil
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go
index c2238b5e97e6..4093b6ba4993 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go
@@ -164,18 +164,29 @@ func doBackoffRetry(s autorest.Sender, r *http.Request, backoff Backoff) (resp *
return
}
resp, err = s.Do(rr.Request())
- rerr := GetErrorWithRetriableHTTPStatusCodes(resp, err, backoff.RetriableHTTPStatusCodes)
+ rerr := GetError(resp, err)
// Abort retries in the following scenarios:
// 1) request succeed
- // 2) request is not retriable
- // 3) request has been throttled
- // 4) request contains non-retriable errors
- // 5) request has completed all the retry steps
+ // 2) request has been throttled
+ // 3) request is not retriable or contains non-retriable errors
+ // 4) request has completed all the retry steps
if rerr == nil {
return resp, nil
}
- if !rerr.Retriable || rerr.IsThrottled() || backoff.isNonRetriableError(rerr) || backoff.Steps == 1 {
+ if rerr.IsThrottled() {
+ return resp, rerr.RawError
+ }
+
+ if !rerr.Retriable {
+ if IsInHTTPStatusCodeSet(rerr, backoff.RetriableHTTPStatusCodes) || isInErrorsSet(rerr, backoff.NonRetriableErrors) {
+ rerr.Retriable = true
+ } else {
+ return resp, rerr.RawError
+ }
+ }
+
+ if backoff.Steps == 1 {
return resp, rerr.RawError
}
@@ -185,8 +196,11 @@ func doBackoffRetry(s autorest.Sender, r *http.Request, backoff Backoff) (resp *
}
return resp, rerr.RawError
}
-
- klog.V(3).Infof("Backoff retrying %s %q with error %v", r.Method, html.EscapeString(r.URL.String()), rerr)
+ drainErr := autorest.DrainResponseBody(resp)
+ if drainErr != nil {
+ klog.V(3).ErrorS(drainErr, "Failed to drain response body")
+ }
+ klog.V(3).Infof("Backoff retrying %s %q with error %v", r.Method, html.EscapeString(r.URL.String()), err)
}
return resp, err
@@ -203,3 +217,17 @@ func delayForBackOff(backoff *Backoff, cancel <-chan struct{}) bool {
return false
}
}
+
+// DoFilterOutNonRetriableError decorator works with autorest.DoRetryForAttempts
+func DoFilterOutNonRetriableError(shouldRetry func(rerr *Error) bool) autorest.SendDecorator {
+ return func(s autorest.Sender) autorest.Sender {
+ return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := s.Do(r)
+ rerr := GetError(resp, err)
+ if rerr != nil && shouldRetry != nil && shouldRetry(rerr) {
+ return resp, rerr.RawError
+ }
+ return resp, nil
+ })
+ }
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go
new file mode 100644
index 000000000000..4282ad14ab96
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package deepcopy
+
+import (
+ "reflect"
+ "sync"
+)
+
+type deepCopyInterface interface {
+ DeepCopy() interface{}
+}
+
+// Copy deepcopies from v.
+func Copy(src interface{}) interface{} {
+ if src == nil {
+ return nil
+ }
+
+ if fromSyncMap, ok := src.(*sync.Map); ok {
+ to := copySyncMap(fromSyncMap)
+ return to
+ }
+
+ return copyNormal(src)
+}
+
+// copySyncMap copies with sync.Map but not nested
+// Targets are vmssVMCache, vmssFlexVMCache, etc.
+func copySyncMap(from *sync.Map) *sync.Map {
+ to := &sync.Map{}
+
+ from.Range(func(k, v interface{}) bool {
+ vm, ok := v.(*sync.Map)
+ if ok {
+ to.Store(k, copySyncMap(vm))
+ } else {
+ to.Store(k, copyNormal(v))
+ }
+ return true
+ })
+
+ return to
+}
+
+func copyNormal(src interface{}) interface{} {
+ if src == nil {
+ return nil
+ }
+
+ from := reflect.ValueOf(src)
+
+ to := reflect.New(from.Type()).Elem()
+
+ copy(from, to)
+
+ return to.Interface()
+}
+
+func copy(from, to reflect.Value) {
+ // Check if DeepCopy() is already implemented for the interface
+ if from.CanInterface() {
+ if deepcopy, ok := from.Interface().(deepCopyInterface); ok {
+ to.Set(reflect.ValueOf(deepcopy.DeepCopy()))
+ return
+ }
+ }
+
+ switch from.Kind() {
+ case reflect.Pointer:
+ fromValue := from.Elem()
+ if !fromValue.IsValid() {
+ return
+ }
+
+ to.Set(reflect.New(fromValue.Type()))
+ copy(fromValue, to.Elem())
+
+ case reflect.Interface:
+ if from.IsNil() {
+ return
+ }
+
+ fromValue := from.Elem()
+ toValue := reflect.New(fromValue.Type()).Elem()
+ copy(fromValue, toValue)
+ to.Set(toValue)
+
+ case reflect.Struct:
+ for i := 0; i < from.NumField(); i++ {
+ if from.Type().Field(i).PkgPath != "" {
+ // It is an unexported field.
+ continue
+ }
+ copy(from.Field(i), to.Field(i))
+ }
+
+ case reflect.Slice:
+ if from.IsNil() {
+ return
+ }
+
+ to.Set(reflect.MakeSlice(from.Type(), from.Len(), from.Cap()))
+ for i := 0; i < from.Len(); i++ {
+ copy(from.Index(i), to.Index(i))
+ }
+
+ case reflect.Map:
+ if from.IsNil() {
+ return
+ }
+
+ to.Set(reflect.MakeMap(from.Type()))
+ for _, key := range from.MapKeys() {
+ fromValue := from.MapIndex(key)
+ toValue := reflect.New(fromValue.Type()).Elem()
+ copy(fromValue, toValue)
+ copiedKey := Copy(key.Interface())
+ to.SetMapIndex(reflect.ValueOf(copiedKey), toValue)
+ }
+
+ default:
+ to.Set(from)
+ }
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/version/base.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/version/base.go
index 730e79f03d14..06ccff0341bd 100644
--- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/version/base.go
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/version/base.go
@@ -55,7 +55,7 @@ var (
// NOTE: The $Format strings are replaced during 'git archive' thanks to the
// companion .gitattributes file containing 'export-subst' in this same
// directory. See also https://git-scm.com/docs/gitattributes
- gitVersion = "v0.0.0-master+$Format:%h$"
+ gitVersion = "v0.0.0-master+$Format:%H$"
gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState = "" // state of git tree, either "clean" or "dirty"