diff --git a/go.mod b/go.mod index 2fc1fae38..861d41b4c 100644 --- a/go.mod +++ b/go.mod @@ -19,12 +19,13 @@ require ( github.com/mdomke/git-semver v1.0.0 github.com/onsi/ginkgo/v2 v2.19.1 github.com/onsi/gomega v1.34.0 - github.com/openshift/api v0.0.0-20240422085825-2624175e9673 + github.com/openshift/api v0.0.0-20240524162738-d899f8877d22 github.com/openshift/cluster-node-tuning-operator v0.0.0-20240611064827-2bd8891ead93 + github.com/openshift/hypershift/api v0.0.0-20241115183703-d41904871380 github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e github.com/sergi/go-diff v1.1.0 github.com/stretchr/testify v1.9.0 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.8.0 k8s.io/api v0.31.2 k8s.io/apiextensions-apiserver v0.31.2 k8s.io/apimachinery v0.31.2 @@ -97,11 +98,11 @@ require ( go.uber.org/ratelimit v0.2.0 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index 1f597af85..b69252edb 100644 --- a/go.sum +++ b/go.sum @@ -1952,14 +1952,16 @@ github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxj github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= -github.com/openshift/api v0.0.0-20240422085825-2624175e9673 h1:D4qblu6z2A92fh7u9Nt1YskDtu+GySKiYP/D3tMWQ6A= -github.com/openshift/api v0.0.0-20240422085825-2624175e9673/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/api v0.0.0-20240524162738-d899f8877d22 h1:AW8KUN4k7qR2egrCCe3x95URHQ3N188+a/b0qpRyAHg= +github.com/openshift/api v0.0.0-20240524162738-d899f8877d22/go.mod h1:7Hm1kLJGxWT6eysOpD2zUztdn+w91eiERn6KtI5o9aw= github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 h1:xbd4qHpyFnnOYDHHnMQa+100MR+K/DFiC1J3BFdL+tQ= github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157/go.mod h1:Q3mt/X5xrxnR5R6BE7duF2ToLioRQJYnTYaaDS4QZTs= github.com/openshift/cluster-node-tuning-operator v0.0.0-20240611064827-2bd8891ead93 h1:w39rlA/dNNolsHBtORZPaNW4L6qdk6a2LCzfpxtVYKM= github.com/openshift/cluster-node-tuning-operator v0.0.0-20240611064827-2bd8891ead93/go.mod h1:1nJC0uQ+XquLauGTdalV9jkYgGrTBGiq1cOgmDyvQz8= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/openshift/hypershift/api v0.0.0-20241115183703-d41904871380 h1:rvDZDAURpDOO/+pf3cR6t+0J8ZT5ueP07LTW47PLy4s= +github.com/openshift/hypershift/api v0.0.0-20241115183703-d41904871380/go.mod h1:NIT2Bs83re4seKsT3Xp+ENOOCN2Gl++mguuGGhNnN/8= github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e h1:lgQ2Iy0NHk/iBaR9yvqSqROCiOovpkbc8MRYSIpHf+M= github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e/go.mod h1:xwAAOZMhwF91Ii8/yfOwp+yH5+GJS7VX90NI39RmSyo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -2408,8 +2410,9 @@ golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2473,8 +2476,9 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2595,8 +2599,9 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2621,8 +2626,9 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2645,8 +2651,9 @@ golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/internal/nodepools/nodepools.go b/internal/nodepools/nodepools.go new file mode 100644 index 000000000..8e29eff81 --- /dev/null +++ b/internal/nodepools/nodepools.go @@ -0,0 +1,85 @@ +package nodepools + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift-kni/numaresources-operator/test/utils/hypershift" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +func GetByClusterName(ctx context.Context, c client.Client, hostedClusterName string) (*hypershiftv1beta1.NodePool, error) { + npList := &hypershiftv1beta1.NodePoolList{} + if err := c.List(ctx, npList); err != nil { + return nil, err + } + var np *hypershiftv1beta1.NodePool + for i := 0; i < len(npList.Items); i++ { + if npList.Items[i].Spec.ClusterName == hostedClusterName { + np = &npList.Items[i] + break + } + } + if np == nil { + return nil, fmt.Errorf("failed to find nodePool associated with cluster %q; existing nodePools are: %+v", hostedClusterName, npList.Items) + } + return np, nil +} + +// AttachConfigObject is attaches a tuning object into the nodepool associated with the hosted-cluster +// The function is idempotent +func AttachConfigObject(ctx context.Context, cli client.Client, object client.Object) error { + hostedClusterName, err := hypershift.GetHostedClusterName() + if err != nil { + return err + } + np, err := GetByClusterName(ctx, cli, hostedClusterName) + if err != nil { + return err + } + np.Spec.Config = addObjectRef(object, np.Spec.Config) + if cli.Update(ctx, np) != nil { + return err + } + return nil +} + +func addObjectRef(object client.Object, Config []corev1.LocalObjectReference) []corev1.LocalObjectReference { + updatedConfig := []corev1.LocalObjectReference{{Name: object.GetName()}} + for i := range Config { + config := Config[i] + if config.Name != object.GetName() { + updatedConfig = append(updatedConfig, config) + } + } + return updatedConfig +} + +func removeObjectRef(object client.Object, Config []corev1.LocalObjectReference) []corev1.LocalObjectReference { + var updatedConfig []corev1.LocalObjectReference + for i := range Config { + if Config[i].Name != object.GetName() { + updatedConfig = append(updatedConfig, Config[i]) + } + } + return updatedConfig +} + +func DeAttachConfigObject(ctx context.Context, cli client.Client, object client.Object) error { + hostedClusterName, err := hypershift.GetHostedClusterName() + if err != nil { + return err + } + np, err := GetByClusterName(ctx, cli, hostedClusterName) + if err != nil { + return err + } + np.Spec.Config = removeObjectRef(object, np.Spec.Config) + if cli.Update(ctx, np) != nil { + return err + } + return nil +} diff --git a/internal/nodepools/nodepools_test.go b/internal/nodepools/nodepools_test.go new file mode 100644 index 000000000..b9101c4ee --- /dev/null +++ b/internal/nodepools/nodepools_test.go @@ -0,0 +1,200 @@ +package nodepools + +import ( + "context" + corev1 "k8s.io/api/core/v1" + "strings" + "testing" + + k8sruntime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +var ( + scheme = k8sruntime.NewScheme() +) + +func TestGetByClusterName(t *testing.T) { + utilruntime.Must(hypershiftv1beta1.AddToScheme(scheme)) + + ctx := context.TODO() + + // Mock data for NodePoolList + nodePools := []hypershiftv1beta1.NodePool{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nodepool1", + Namespace: "default", + }, + Spec: hypershiftv1beta1.NodePoolSpec{ + ClusterName: "cluster1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "nodepool2", + Namespace: "default", + }, + Spec: hypershiftv1beta1.NodePoolSpec{ + ClusterName: "cluster2", + }, + }, + } + + testCases := []struct { + name string + clusterName string + expectedError string + expectedObjectKey string + }{ + { + name: "Successfully finds NodePool by cluster name", + clusterName: "cluster1", + expectedError: "", + expectedObjectKey: client.ObjectKeyFromObject(&nodePools[0]).String(), + }, + { + name: "Returns error when no NodePool matches cluster name", + clusterName: "nonexistent-cluster", + expectedError: "failed to find nodePool associated with cluster \"nonexistent-cluster\"; existing nodePools are:", + expectedObjectKey: "", + }, + } + + // Run testCases + t.Run("GetByClusterName Tests", func(t *testing.T) { + // Initialize a fake client with the mock data + fakeClient := fake.NewClientBuilder().WithObjects(&nodePools[0], &nodePools[1]).WithScheme(scheme).Build() + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + np, err := GetByClusterName(ctx, fakeClient, tc.clusterName) + if err != nil { + if tc.expectedError == "" { + t.Errorf("no error expected to be returned; got %v", err) + } + if !strings.Contains(err.Error(), tc.expectedError) { + t.Errorf("expected error %v, got %v", tc.expectedError, err) + } + } else { + if tc.expectedObjectKey != client.ObjectKeyFromObject(np).String() { + t.Errorf("expected object key %v, got %v", tc.expectedObjectKey, client.ObjectKeyFromObject(np).String()) + } + } + }) + } + }) +} + +func TestAddObjectRef(t *testing.T) { + // Mock object + mockObject := &corev1.ConfigMap{} + mockObject.SetName("object1") + + testCases := []struct { + name string + initialConfig []corev1.LocalObjectReference + expected []corev1.LocalObjectReference + }{ + { + name: "Add new object reference", + initialConfig: []corev1.LocalObjectReference{ + {Name: "object2"}, + {Name: "object3"}, + }, + expected: []corev1.LocalObjectReference{ + {Name: "object1"}, + {Name: "object2"}, + {Name: "object3"}, + }, + }, + { + name: "Replace existing object reference", + initialConfig: []corev1.LocalObjectReference{ + {Name: "object1"}, + {Name: "object3"}, + }, + expected: []corev1.LocalObjectReference{ + {Name: "object1"}, + {Name: "object3"}, + }, + }, + { + name: "Add to empty configuration", + expected: []corev1.LocalObjectReference{{Name: "object1"}}, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := addObjectRef(mockObject, tc.initialConfig) + if len(result) != len(tc.expected) { + t.Errorf("Test %q failed: expected length %d, got %d", tc.name, len(tc.expected), len(result)) + } + for i := range result { + if result[i].Name != tc.expected[i].Name { + t.Errorf("Test %q failed: expected %v, got %v", tc.name, tc.expected[i], result[i]) + } + } + }) + } +} + +func TestRemoveObjectRef(t *testing.T) { + // Mock object + mockObject := &corev1.ConfigMap{} + mockObject.SetName("object1") + + testCases := []struct { + name string + initialConfig []corev1.LocalObjectReference + expected []corev1.LocalObjectReference + }{ + { + name: "Remove existing object reference", + initialConfig: []corev1.LocalObjectReference{ + {Name: "object1"}, + {Name: "object2"}, + }, + expected: []corev1.LocalObjectReference{ + {Name: "object2"}, + }, + }, + { + name: "Do nothing if object reference does not exist", + initialConfig: []corev1.LocalObjectReference{ + {Name: "object2"}, + {Name: "object3"}, + }, + expected: []corev1.LocalObjectReference{ + {Name: "object2"}, + {Name: "object3"}, + }, + }, + { + name: "Handle empty configuration", + expected: []corev1.LocalObjectReference{}, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := removeObjectRef(mockObject, tc.initialConfig) + if len(result) != len(tc.expected) { + t.Errorf("Test %q failed: expected length %d, got %d", tc.name, len(tc.expected), len(result)) + } + for i := range result { + if result[i].Name != tc.expected[i].Name { + t.Errorf("Test %q failed: expected %v, got %v", tc.name, tc.expected[i], result[i]) + } + } + }) + } +} diff --git a/internal/wait/nodepool.go b/internal/wait/nodepool.go new file mode 100644 index 000000000..d64d0b46b --- /dev/null +++ b/internal/wait/nodepool.go @@ -0,0 +1,46 @@ +package wait + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +func ForUpdatingConfig(ctx context.Context, c client.Client, NpName, namespace string) error { + return waitForCondition(ctx, c, NpName, namespace, func(conds []hypershiftv1beta1.NodePoolCondition) bool { + for _, cond := range conds { + if cond.Type == hypershiftv1beta1.NodePoolUpdatingConfigConditionType { + return cond.Status == corev1.ConditionTrue + } + } + return false + }) +} + +func ForConfigToBeReady(ctx context.Context, c client.Client, NpName, namespace string) error { + return waitForCondition(ctx, c, NpName, namespace, func(conds []hypershiftv1beta1.NodePoolCondition) bool { + for _, cond := range conds { + if cond.Type == hypershiftv1beta1.NodePoolUpdatingConfigConditionType { + return cond.Status == corev1.ConditionFalse + } + } + return false + }) +} + +func waitForCondition(ctx context.Context, c client.Client, NpName, namespace string, conditionFunc func([]hypershiftv1beta1.NodePoolCondition) bool) error { + return wait.PollUntilContextTimeout(ctx, time.Second*10, time.Minute*60, false, func(ctx context.Context) (done bool, err error) { + np := &hypershiftv1beta1.NodePool{} + key := client.ObjectKey{Name: NpName, Namespace: namespace} + err = c.Get(ctx, key, np) + if err != nil { + return false, err + } + return conditionFunc(np.Status.Conditions), nil + }) +} diff --git a/test/utils/clients/clients.go b/test/utils/clients/clients.go index f735e0b50..cea16ca4c 100644 --- a/test/utils/clients/clients.go +++ b/test/utils/clients/clients.go @@ -30,6 +30,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" nropv1 "github.com/openshift-kni/numaresources-operator/api/numaresourcesoperator/v1" + "github.com/openshift-kni/numaresources-operator/test/utils/hypershift" ) var ( @@ -37,6 +38,9 @@ var ( Client client.Client // K8sClient defines k8s client to run subresource operations, for example you should use it to get pod logs K8sClient *kubernetes.Clientset + // MNGClient defines the API client to run CRUD operations on HyperShift management cluster, + // that will be used for testing + MNGClient client.Client // ClientsEnabled tells if the client from the package can be used ClientsEnabled bool ) @@ -75,6 +79,13 @@ func init() { ClientsEnabled = false return } + if hypershift.IsHypershiftCluster() { + MNGClient, err = hypershift.BuildControlPlaneClient() + if err != nil { + ClientsEnabled = false + return + } + } ClientsEnabled = true } diff --git a/test/utils/hypershift/hypershift.go b/test/utils/hypershift/hypershift.go new file mode 100644 index 000000000..1563492c2 --- /dev/null +++ b/test/utils/hypershift/hypershift.go @@ -0,0 +1,71 @@ +package hypershift + +import ( + "fmt" + "os" + + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var isHypershiftCluster bool + +func init() { + if v, ok := os.LookupEnv("CLUSTER_TYPE"); ok && v == "hypershift" { + klog.Infof("hypershift cluster detected") + isHypershiftCluster = true + } +} + +const ( + ManagementClusterKubeConfigEnv = "HYPERSHIFT_MANAGEMENT_CLUSTER_KUBECONFIG" + HostedControlPlaneNamespaceEnv = "HYPERSHIFT_HOSTED_CONTROL_PLANE_NAMESPACE" + HostedClusterNameEnv = "CLUSTER_NAME" +) + +func BuildControlPlaneClient() (client.Client, error) { + kcPath, ok := os.LookupEnv(ManagementClusterKubeConfigEnv) + if !ok { + return nil, fmt.Errorf("failed to build management-cluster client for hypershift, environment variable %q is not defined", ManagementClusterKubeConfigEnv) + } + c, err := buildClient(kcPath) + if err != nil { + return nil, fmt.Errorf("failed to build management-cluster client for hypershift; err %v", err) + } + return c, nil +} + +func GetHostedClusterName() (string, error) { + v, ok := os.LookupEnv(HostedClusterNameEnv) + if !ok { + return "", fmt.Errorf("failed to retrieve hosted cluster name; %q environment var is not set", HostedClusterNameEnv) + } + return v, nil +} + +func GetManagementClusterNamespace() (string, error) { + ns, ok := os.LookupEnv(HostedControlPlaneNamespaceEnv) + if !ok { + return "", fmt.Errorf("failed to retrieve management cluster namespace; %q environment var is not set", HostedControlPlaneNamespaceEnv) + } + return ns, nil +} + +// IsHypershiftCluster should be used only on CI environment +func IsHypershiftCluster() bool { + return isHypershiftCluster +} + +func buildClient(kubeConfigPath string) (client.Client, error) { + restConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) + if err != nil { + return nil, err + } + c, err := client.New(restConfig, client.Options{}) + if err != nil { + return nil, err + } + return c, nil +} diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go deleted file mode 100644 index 737c4e322..000000000 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ /dev/null @@ -1,598 +0,0 @@ -package v1 - -import "fmt" - -// FeatureGateDescription is a golang-only interface used to contains details for a feature gate. -type FeatureGateDescription struct { - // FeatureGateAttributes is the information that appears in the API - FeatureGateAttributes FeatureGateAttributes - - // OwningJiraComponent is the jira component that owns most of the impl and first assignment for the bug. - // This is the team that owns the feature long term. - OwningJiraComponent string - // ResponsiblePerson is the person who is on the hook for first contact. This is often, but not always, a team lead. - // It is someone who can make the promise on the behalf of the team. - ResponsiblePerson string - // OwningProduct is the product that owns the lifecycle of the gate. - OwningProduct OwningProduct -} - -type ClusterProfileName string - -var ( - Hypershift = ClusterProfileName("include.release.openshift.io/ibm-cloud-managed") - SelfManaged = ClusterProfileName("include.release.openshift.io/self-managed-high-availability") - AllClusterProfiles = []ClusterProfileName{Hypershift, SelfManaged} -) - -type OwningProduct string - -var ( - ocpSpecific = OwningProduct("OCP") - kubernetes = OwningProduct("Kubernetes") -) - -type featureGateBuilder struct { - name string - owningJiraComponent string - responsiblePerson string - owningProduct OwningProduct - - statusByClusterProfileByFeatureSet map[ClusterProfileName]map[FeatureSet]bool -} - -// newFeatureGate featuregate are disabled in every FeatureSet and selectively enabled -func newFeatureGate(name string) *featureGateBuilder { - b := &featureGateBuilder{ - name: name, - statusByClusterProfileByFeatureSet: map[ClusterProfileName]map[FeatureSet]bool{}, - } - for _, clusterProfile := range AllClusterProfiles { - byFeatureSet := map[FeatureSet]bool{} - for _, featureSet := range AllFixedFeatureSets { - byFeatureSet[featureSet] = false - } - b.statusByClusterProfileByFeatureSet[clusterProfile] = byFeatureSet - } - return b -} - -func (b *featureGateBuilder) reportProblemsToJiraComponent(owningJiraComponent string) *featureGateBuilder { - b.owningJiraComponent = owningJiraComponent - return b -} - -func (b *featureGateBuilder) contactPerson(responsiblePerson string) *featureGateBuilder { - b.responsiblePerson = responsiblePerson - return b -} - -func (b *featureGateBuilder) productScope(owningProduct OwningProduct) *featureGateBuilder { - b.owningProduct = owningProduct - return b -} - -func (b *featureGateBuilder) enableIn(featureSets ...FeatureSet) *featureGateBuilder { - for clusterProfile := range b.statusByClusterProfileByFeatureSet { - for _, featureSet := range featureSets { - b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true - } - } - return b -} - -func (b *featureGateBuilder) enableForClusterProfile(clusterProfile ClusterProfileName, featureSets ...FeatureSet) *featureGateBuilder { - for _, featureSet := range featureSets { - b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true - } - return b -} - -func (b *featureGateBuilder) register() (FeatureGateName, error) { - if len(b.name) == 0 { - return "", fmt.Errorf("missing name") - } - if len(b.owningJiraComponent) == 0 { - return "", fmt.Errorf("missing owningJiraComponent") - } - if len(b.responsiblePerson) == 0 { - return "", fmt.Errorf("missing responsiblePerson") - } - if len(b.owningProduct) == 0 { - return "", fmt.Errorf("missing owningProduct") - } - - featureGateName := FeatureGateName(b.name) - description := FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: featureGateName, - }, - OwningJiraComponent: b.owningJiraComponent, - ResponsiblePerson: b.responsiblePerson, - OwningProduct: b.owningProduct, - } - - // statusByClusterProfileByFeatureSet is initialized by constructor to be false for every combination - for clusterProfile, byFeatureSet := range b.statusByClusterProfileByFeatureSet { - for featureSet, enabled := range byFeatureSet { - if _, ok := allFeatureGates[clusterProfile]; !ok { - allFeatureGates[clusterProfile] = map[FeatureSet]*FeatureGateEnabledDisabled{} - } - if _, ok := allFeatureGates[clusterProfile][featureSet]; !ok { - allFeatureGates[clusterProfile][featureSet] = &FeatureGateEnabledDisabled{} - } - - if enabled { - allFeatureGates[clusterProfile][featureSet].Enabled = append(allFeatureGates[clusterProfile][featureSet].Enabled, description) - } else { - allFeatureGates[clusterProfile][featureSet].Disabled = append(allFeatureGates[clusterProfile][featureSet].Disabled, description) - } - } - } - - return featureGateName, nil -} - -func (b *featureGateBuilder) mustRegister() FeatureGateName { - ret, err := b.register() - if err != nil { - panic(err) - } - return ret -} - -func FeatureSets(clusterProfile ClusterProfileName, featureSet FeatureSet) (*FeatureGateEnabledDisabled, error) { - byFeatureSet, ok := allFeatureGates[clusterProfile] - if !ok { - return nil, fmt.Errorf("no information found for ClusterProfile=%q", clusterProfile) - } - featureGates, ok := byFeatureSet[featureSet] - if !ok { - return nil, fmt.Errorf("no information found for FeatureSet=%q under ClusterProfile=%q", featureSet, clusterProfile) - } - return featureGates.DeepCopy(), nil -} - -func AllFeatureSets() map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled { - ret := map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled{} - - for clusterProfile, byFeatureSet := range allFeatureGates { - newByFeatureSet := map[FeatureSet]*FeatureGateEnabledDisabled{} - - for featureSet, enabledDisabled := range byFeatureSet { - newByFeatureSet[featureSet] = enabledDisabled.DeepCopy() - } - ret[clusterProfile] = newByFeatureSet - } - - return ret -} - -var ( - allFeatureGates = map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled{} - - FeatureGateServiceAccountTokenNodeBindingValidation = newFeatureGate("ServiceAccountTokenNodeBindingValidation"). - reportProblemsToJiraComponent("apiserver-auth"). - contactPerson("stlaz"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). - reportProblemsToJiraComponent("apiserver-auth"). - contactPerson("stlaz"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateServiceAccountTokenPodNodeInfo = newFeatureGate("ServiceAccountTokenPodNodeInfo"). - reportProblemsToJiraComponent("apiserver-auth"). - contactPerson("stlaz"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). - reportProblemsToJiraComponent("kube-apiserver"). - contactPerson("benluddy"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). - reportProblemsToJiraComponent("Routing"). - contactPerson("miciah"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateOpenShiftPodSecurityAdmission = newFeatureGate("OpenShiftPodSecurityAdmission"). - reportProblemsToJiraComponent("auth"). - contactPerson("stlaz"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExternalCloudProvider = newFeatureGate("ExternalCloudProvider"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExternalCloudProviderAzure = newFeatureGate("ExternalCloudProviderAzure"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExternalCloudProviderGCP = newFeatureGate("ExternalCloudProviderGCP"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExternalCloudProviderExternal = newFeatureGate("ExternalCloudProviderExternal"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("elmiko"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateCSIDriverSharedResource = newFeatureGate("CSIDriverSharedResource"). - reportProblemsToJiraComponent("builds"). - contactPerson("adkaplan"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes"). - reportProblemsToJiraComponent("builds"). - contactPerson("adkaplan"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNodeSwap = newFeatureGate("NodeSwap"). - reportProblemsToJiraComponent("node"). - contactPerson("ehashman"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack"). - reportProblemsToJiraComponent("openstack"). - contactPerson("egarcia"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI"). - reportProblemsToJiraComponent("insights"). - contactPerson("tremes"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation"). - reportProblemsToJiraComponent("scheduling"). - contactPerson("jchaloup"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateAzureWorkloadIdentity = newFeatureGate("AzureWorkloadIdentity"). - reportProblemsToJiraComponent("cloud-credential-operator"). - contactPerson("abutcher"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). - reportProblemsToJiraComponent("apps"). - contactPerson("atiratree"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateEventedPLEG = newFeatureGate("EventedPLEG"). - reportProblemsToJiraComponent("node"). - contactPerson("sairameshv"). - productScope(kubernetes). - mustRegister() - - FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). - reportProblemsToJiraComponent("Routing"). - contactPerson("miciah"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). - reportProblemsToJiraComponent("node"). - contactPerson("sgrunert"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags"). - reportProblemsToJiraComponent("Installer"). - contactPerson("bhb"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs"). - reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). - contactPerson("mkowalsk"). - productScope(kubernetes). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). - reportProblemsToJiraComponent("splat"). - contactPerson("rvanderp3"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). - reportProblemsToJiraComponent("router"). - contactPerson("thejasn"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("tssurya"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("pliu"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). - reportProblemsToJiraComponent("Networking/cluster-network-operator"). - contactPerson("kyrtapz"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed"). - reportProblemsToJiraComponent("etcd"). - contactPerson("hasbro17"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateAutomatedEtcdBackup = newFeatureGate("AutomatedEtcdBackup"). - reportProblemsToJiraComponent("etcd"). - contactPerson("hasbro17"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = newFeatureGate("MachineAPIOperatorDisableMachineHealthCheckController"). - reportProblemsToJiraComponent("ecoproject"). - contactPerson("msluiter"). - productScope(ocpSpecific). - mustRegister() - - FeatureGateDNSNameResolver = newFeatureGate("DNSNameResolver"). - reportProblemsToJiraComponent("dns"). - contactPerson("miciah"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereControlPlaneMachineset = newFeatureGate("VSphereControlPlaneMachineSet"). - reportProblemsToJiraComponent("splat"). - contactPerson("rvanderp3"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("cdoern"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). - reportProblemsToJiraComponent("Installer"). - contactPerson("vincepri"). - productScope(ocpSpecific). - mustRegister() - - FeatureGateMetricsServer = newFeatureGate("MetricsServer"). - reportProblemsToJiraComponent("Monitoring"). - contactPerson("slashpai"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateInstallAlternateInfrastructureAWS = newFeatureGate("InstallAlternateInfrastructureAWS"). - reportProblemsToJiraComponent("Installer"). - contactPerson("padillon"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). - reportProblemsToJiraComponent("Installer"). - contactPerson("barbacbd"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation"). - reportProblemsToJiraComponent("NodeTuningOperator"). - contactPerson("titzhak"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateManagedBootImages = newFeatureGate("ManagedBootImages"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("djoshy"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(kubernetes). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("dkhater"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateSignatureStores = newFeatureGate("SignatureStores"). - reportProblemsToJiraComponent("Cluster Version Operator"). - contactPerson("lmohanty"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateKMSv1 = newFeatureGate("KMSv1"). - reportProblemsToJiraComponent("kube-apiserver"). - contactPerson("dgrisonnet"). - productScope(kubernetes). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGatePinnedImages = newFeatureGate("PinnedImages"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jhernand"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). - reportProblemsToJiraComponent("Cluster Version Operator"). - contactPerson("pmuller"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests"). - reportProblemsToJiraComponent("kube-apiserver"). - contactPerson("akashem"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("fbertina"). - productScope(kubernetes). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC"). - reportProblemsToJiraComponent("authentication"). - contactPerson("stlaz"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - enableForClusterProfile(Hypershift, Default, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExample = newFeatureGate("Example"). - reportProblemsToJiraComponent("cluster-config"). - contactPerson("deads"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). - reportProblemsToJiraComponent("olm"). - contactPerson("joe"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNewOLM = newFeatureGate("NewOLM"). - reportProblemsToJiraComponent("olm"). - contactPerson("joe"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateExternalRouteCertificate = newFeatureGate("ExternalRouteCertificate"). - reportProblemsToJiraComponent("network-edge"). - contactPerson("miciah"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). - reportProblemsToJiraComponent("insights"). - contactPerson("tremes"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). - reportProblemsToJiraComponent("metal"). - contactPerson("EmilienM"). - productScope(ocpSpecific). - enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateInsightsConfig = newFeatureGate("InsightsConfig"). - reportProblemsToJiraComponent("insights"). - contactPerson("tremes"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateImagePolicy = newFeatureGate("ImagePolicy"). - reportProblemsToJiraComponent("node"). - contactPerson("rphillips"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jerzhang"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). - reportProblemsToJiraComponent("Monitoring"). - contactPerson("rexagod"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("rbednar"). - productScope(ocpSpecific). - enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). - mustRegister() -) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index bdae46689..d815556d2 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -19,6 +19,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=apiservers,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type APIServer struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index b3dfa61b5..f6f0c12a3 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -17,6 +17,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=authentications,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Authentication struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index 36b1696af..e8f197b34 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -19,6 +19,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=consoles,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Console struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 1875c9cdd..5daa5d78d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -15,6 +15,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=dnses,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type DNS struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index ef2c0cc14..2769ba35a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -17,6 +17,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=featuregates,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type FeatureGate struct { metav1.TypeMeta `json:",inline"` @@ -148,8 +149,3 @@ type FeatureGateList struct { Items []FeatureGate `json:"items"` } - -type FeatureGateEnabledDisabled struct { - Enabled []FeatureGateDescription - Disabled []FeatureGateDescription -} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index 74511f864..a344086c0 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -20,6 +20,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=images,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Image struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index f2faf1996..74df4027f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ImageContentPolicy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index 8fa38f223..43d748c0c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ImageDigestMirrorSet struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index d9627b78c..ca8d35515 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ImageTagMirrorSet struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index fb224c6e6..8e50008ea 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -18,6 +18,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=infrastructures,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Infrastructure struct { metav1.TypeMeta `json:",inline"` @@ -825,7 +826,7 @@ type BareMetalPlatformSpec struct { // // +kubebuilder:validation:MaxItems=2 // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +listType=atomic // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -840,15 +841,16 @@ type BareMetalPlatformSpec struct { // // +kubebuilder:validation:MaxItems=2 // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +listType=atomic // +optional IngressIPs []IP `json:"ingressIPs"` // machineNetworks are IP networks used to connect all the OpenShift cluster // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, // for example "10.0.0.0/8" or "fd00::/8". - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -873,7 +875,8 @@ type BareMetalPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic APIServerInternalIPs []string `json:"apiServerInternalIPs"` // ingressIP is an external IP which routes to the default ingress controller. @@ -889,7 +892,8 @@ type BareMetalPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic IngressIPs []string `json:"ingressIPs"` // nodeDNSIP is the IP address for the internal DNS used by the @@ -908,8 +912,9 @@ type BareMetalPlatformStatus struct { LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -952,7 +957,7 @@ type OpenStackPlatformSpec struct { // // +kubebuilder:validation:MaxItems=2 // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +listType=atomic // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -967,15 +972,16 @@ type OpenStackPlatformSpec struct { // // +kubebuilder:validation:MaxItems=2 // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +listType=atomic // +optional IngressIPs []IP `json:"ingressIPs"` // machineNetworks are IP networks used to connect all the OpenShift cluster // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, // for example "10.0.0.0/8" or "fd00::/8". - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -998,7 +1004,8 @@ type OpenStackPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic APIServerInternalIPs []string `json:"apiServerInternalIPs"` // cloudName is the name of the desired OpenStack cloud in the @@ -1018,7 +1025,8 @@ type OpenStackPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic IngressIPs []string `json:"ingressIPs"` // nodeDNSIP is the IP address for the internal DNS used by the @@ -1036,8 +1044,9 @@ type OpenStackPlatformStatus struct { LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -1085,6 +1094,7 @@ type OvirtPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set APIServerInternalIPs []string `json:"apiServerInternalIPs"` @@ -1101,6 +1111,7 @@ type OvirtPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set IngressIPs []string `json:"ingressIPs"` @@ -1333,8 +1344,9 @@ type VSpherePlatformSpec struct { // --- // + If VCenters is not defined use the existing cloud-config configmap defined // + in openshift-config. - // +kubebuilder:validation:MaxItems=1 // +kubebuilder:validation:MinItems=0 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 // +listType=atomic // +optional VCenters []VSpherePlatformVCenterSpec `json:"vcenters,omitempty"` @@ -1366,7 +1378,7 @@ type VSpherePlatformSpec struct { // // +kubebuilder:validation:MaxItems=2 // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +listType=atomic // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -1381,15 +1393,16 @@ type VSpherePlatformSpec struct { // // +kubebuilder:validation:MaxItems=2 // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +listType=atomic // +optional IngressIPs []IP `json:"ingressIPs"` // machineNetworks are IP networks used to connect all the OpenShift cluster // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, // for example "10.0.0.0/8" or "fd00::/8". - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -1412,7 +1425,8 @@ type VSpherePlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic APIServerInternalIPs []string `json:"apiServerInternalIPs"` // ingressIP is an external IP which routes to the default ingress controller. @@ -1428,7 +1442,8 @@ type VSpherePlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic IngressIPs []string `json:"ingressIPs"` // nodeDNSIP is the IP address for the internal DNS used by the @@ -1447,8 +1462,9 @@ type VSpherePlatformStatus struct { LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -1813,6 +1829,7 @@ type NutanixPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set APIServerInternalIPs []string `json:"apiServerInternalIPs"` @@ -1829,6 +1846,7 @@ type NutanixPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set IngressIPs []string `json:"ingressIPs"` diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index e58ad7f00..302913a16 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -18,6 +18,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=ingresses,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Ingress struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 618aeff3b..211d5c088 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -18,6 +18,7 @@ import ( // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +kubebuilder:object:root=true // +kubebuilder:resource:path=networks,scope=Cluster +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Network struct { metav1.TypeMeta `json:",inline"` @@ -45,11 +46,13 @@ type Network struct { type NetworkSpec struct { // IP address pool to use for pod IPs. // This field is immutable after installation. + // +listType=atomic ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` // IP address pool for services. // Currently, we only support a single entry here. // This field is immutable after installation. + // +listType=atomic ServiceNetwork []string `json:"serviceNetwork"` // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). @@ -90,10 +93,12 @@ type NetworkSpec struct { // NetworkStatus is the current network configuration. type NetworkStatus struct { // IP address pool to use for pod IPs. + // +listType=atomic ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` // IP address pool for services. // Currently, we only support a single entry here. + // +listType=atomic ServiceNetwork []string `json:"serviceNetwork,omitempty"` // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). @@ -148,6 +153,7 @@ type ExternalIPConfig struct { // ExternalIPPolicy rules. // Currently, only one entry may be provided. // +optional + // +listType=atomic AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` } @@ -156,11 +162,13 @@ type ExternalIPConfig struct { // The policy controller always allows automatically assigned external IPs. type ExternalIPPolicy struct { // allowedCIDRs is the list of allowed CIDRs. + // +listType=atomic AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` // rejectedCIDRs is the list of disallowed CIDRs. These take precedence // over allowedCIDRs. // +optional + // +listType=atomic RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 3dd31f39a..8bf099bd5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -19,6 +19,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=nodes,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Node struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index 6654479dc..dce08a17f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -19,6 +19,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=oauths,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type OAuth struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index 8d6d614b6..78fd3f41a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -15,6 +15,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=projects,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Project struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 851291bb0..2dfc66b1c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -17,6 +17,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=proxies,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Proxy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 061c4a883..2749f4f70 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:object:root=true // +kubebuilder:resource:path=schedulers,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Scheduler struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index c80e66c43..9a81bc559 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -2039,23 +2039,6 @@ func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateDescription) DeepCopyInto(out *FeatureGateDescription) { - *out = *in - out.FeatureGateAttributes = in.FeatureGateAttributes - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDescription. -func (in *FeatureGateDescription) DeepCopy() *FeatureGateDescription { - if in == nil { - return nil - } - out := new(FeatureGateDescription) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) { *out = *in @@ -2082,32 +2065,6 @@ func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]FeatureGateDescription, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]FeatureGateDescription, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. -func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { - if in == nil { - return nil - } - out := new(FeatureGateEnabledDisabled) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 286bbbd84..ddc7594f7 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -1,5 +1,6 @@ apiservers.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: apiservers.config.openshift.io Capability: "" @@ -20,7 +21,8 @@ apiservers.config.openshift.io: Version: v1 authentications.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: authentications.config.openshift.io Capability: "" @@ -144,7 +146,8 @@ clusterversions.config.openshift.io: Version: v1 consoles.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: consoles.config.openshift.io Capability: "" @@ -165,7 +168,8 @@ consoles.config.openshift.io: Version: v1 dnses.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: dnses.config.openshift.io Capability: "" @@ -186,7 +190,8 @@ dnses.config.openshift.io: Version: v1 featuregates.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: featuregates.config.openshift.io Capability: "" @@ -207,7 +212,8 @@ featuregates.config.openshift.io: Version: v1 images.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: images.config.openshift.io Capability: "" @@ -228,7 +234,8 @@ images.config.openshift.io: Version: v1 imagecontentpolicies.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/874 CRDName: imagecontentpolicies.config.openshift.io Capability: "" @@ -249,7 +256,8 @@ imagecontentpolicies.config.openshift.io: Version: v1 imagedigestmirrorsets.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/1126 CRDName: imagedigestmirrorsets.config.openshift.io Capability: "" @@ -271,7 +279,8 @@ imagedigestmirrorsets.config.openshift.io: Version: v1 imagetagmirrorsets.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/1126 CRDName: imagetagmirrorsets.config.openshift.io Capability: "" @@ -293,7 +302,8 @@ imagetagmirrorsets.config.openshift.io: Version: v1 infrastructures.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: infrastructures.config.openshift.io Capability: "" @@ -303,6 +313,7 @@ infrastructures.config.openshift.io: - GCPClusterHostedDNS - GCPLabelsTags - VSphereControlPlaneMachineSet + - VSphereMultiVCenters FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -318,7 +329,8 @@ infrastructures.config.openshift.io: Version: v1 ingresses.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: ingresses.config.openshift.io Capability: "" @@ -339,7 +351,8 @@ ingresses.config.openshift.io: Version: v1 networks.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: networks.config.openshift.io Capability: "" @@ -362,7 +375,8 @@ networks.config.openshift.io: Version: v1 nodes.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/1107 CRDName: nodes.config.openshift.io Capability: "" @@ -383,7 +397,8 @@ nodes.config.openshift.io: Version: v1 oauths.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: oauths.config.openshift.io Capability: "" @@ -425,7 +440,8 @@ operatorhubs.config.openshift.io: Version: v1 projects.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: projects.config.openshift.io Capability: "" @@ -446,7 +462,8 @@ projects.config.openshift.io: Version: v1 proxies.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: proxies.config.openshift.io Capability: "" @@ -467,7 +484,8 @@ proxies.config.openshift.io: Version: v1 schedulers.config.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: schedulers.config.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml index a10144da5..00e41bca5 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml @@ -32,6 +32,7 @@ controllerconfigs.machineconfiguration.openshift.io: - GCPClusterHostedDNS - GCPLabelsTags - VSphereControlPlaneMachineSet + - VSphereMultiVCenters FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index 474253d5d..66e3798d9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -57,6 +57,11 @@ type ConsoleSpec struct { // plugins defines a list of enabled console plugin names. // +optional Plugins []string `json:"plugins,omitempty"` + // ingress allows to configure the alternative ingress for the console. + // This field is intended for clusters without ingress capability, + // where access to routes is not possible. + // +optional + Ingress Ingress `json:"ingress"` } // ConsoleConfigRoute holds information on external route access to console. @@ -375,6 +380,35 @@ const ( BrandROSA Brand = "ROSA" ) +// Ingress allows cluster admin to configure alternative ingress for the console. +type Ingress struct { + // consoleURL is a URL to be used as the base console address. + // If not specified, the console route hostname will be used. + // This field is required for clusters without ingress capability, + // where access to routes is not possible. + // Make sure that appropriate ingress is set up at this URL. + // The console operator will monitor the URL and may go degraded + // if it's unreachable for an extended period. + // Must use the HTTPS scheme. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || isURL(self)",message="console url must be a valid absolute URL" + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || url(self).getScheme() == 'https'",message="console url scheme must be https" + // +kubebuilder:validation:MaxLength=1024 + ConsoleURL string `json:"consoleURL"` + // clientDownloadsURL is a URL to be used as the address to download client binaries. + // If not specified, the downloads route hostname will be used. + // This field is required for clusters without ingress capability, + // where access to routes is not possible. + // The console operator will monitor the URL and may go degraded + // if it's unreachable for an extended period. + // Must use the HTTPS scheme. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || isURL(self)",message="client downloads url must be a valid absolute URL" + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || url(self).getScheme() == 'https'",message="client downloads url scheme must be https" + // +kubebuilder:validation:MaxLength=1024 + ClientDownloadsURL string `json:"clientDownloadsURL"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 349c8d461..4fccecb9f 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -279,6 +279,7 @@ type VSphereCSIDriverConfigSpec struct { // If cluster Infrastructure object has a topology, values specified in // Infrastructure object will be used and modifications to topologyCategories // will be rejected. + // +listType=atomic // +optional TopologyCategories []string `json:"topologyCategories,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index a2ba12689..71345d7d7 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -43,6 +43,18 @@ type EtcdSpec struct { // +openshift:enable:FeatureGate=HardwareSpeed // +optional HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` + + // backendQuotaGiB sets the etcd backend storage size limit in gibibytes. + // The value should be an integer not less than 8 and not more than 32. + // When not specified, the default value is 8. + // +kubebuilder:default:=8 + // +kubebuilder:validation:Minimum=8 + // +kubebuilder:validation:Maximum=32 + // +kubebuilder:validation:XValidation:rule="self>=oldSelf",message="etcd backendQuotaGiB may not be decreased" + // +openshift:enable:FeatureGate=EtcdBackendQuota + // +default=8 + // +optional + BackendQuotaGiB int32 `json:"backendQuotaGiB,omitempty"` } type EtcdStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 5afc154dc..8bd41eb69 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -57,8 +57,39 @@ type MachineConfigurationSpec struct { } type MachineConfigurationStatus struct { - // TODO tombstone this field - StaticPodOperatorStatus `json:",inline"` + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // Previously there was a StaticPodOperatorStatus here for legacy reasons. Many of the fields within + // it are no longer relevant for the MachineConfiguration CRD's functions. The following remainder + // fields were tombstoned after lifting out StaticPodOperatorStatus. To avoid conflicts with + // serialisation, the following field names may never be used again. + + // Tombstone: legacy field from StaticPodOperatorStatus + // Version string `json:"version,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // ReadyReplicas int32 `json:"readyReplicas"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // Generations []GenerationStatus `json:"generations,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, // and will be used by the Machine Config Daemon during future node updates. @@ -351,7 +382,7 @@ type NodeDisruptionPolicyStatusSSHKey struct { // +union type NodeDisruptionPolicySpecAction struct { // type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed - // Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special + // Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. // reload/restart requires a corresponding service target specified in the reload/restart field. // Other values require no further configuration // +unionDiscriminator @@ -370,7 +401,7 @@ type NodeDisruptionPolicySpecAction struct { // +union type NodeDisruptionPolicyStatusAction struct { // type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed - // Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special + // Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. // reload/restart requires a corresponding service target specified in the reload/restart field. // Other values require no further configuration // +unionDiscriminator @@ -463,3 +494,16 @@ const ( // Special represents an action that is internal to the MCO, and is not allowed in user defined NodeDisruption policies. SpecialStatusAction NodeDisruptionPolicyStatusActionType = "Special" ) + +// These strings will be used for MachineConfiguration Status conditions. +const ( + // MachineConfigurationBootImageUpdateDegraded means that the MCO ran into an error while reconciling boot images. This + // will cause the clusteroperators.config.openshift.io/machine-config to degrade. This condition will indicate the cause + // of the degrade, the progress of the update and the generation of the boot images configmap that it degraded on. + MachineConfigurationBootImageUpdateDegraded string = "BootImageUpdateDegraded" + + // MachineConfigurationBootImageUpdateProgressing means that the MCO is in the process of reconciling boot images. This + // will cause the clusteroperators.config.openshift.io/machine-config to be in a Progressing state. This condition will + // indicate the progress of the update and the generation of the boot images configmap that triggered this update. + MachineConfigurationBootImageUpdateProgressing string = "BootImageUpdateProgressing" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 970be707e..35bb5ada3 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -11,7 +11,6 @@ import ( // +kubebuilder:resource:path=networks,scope=Cluster // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 // +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=network,operatorOrdering=01 -// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true // Network describes the cluster's desired network configuration. It is // consumed by the cluster-network-operator. @@ -135,7 +134,7 @@ const ( ) // NetworkMigration represents the cluster network configuration. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == '' || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" type NetworkMigration struct { // networkType is the target type of network migration. Set this to the // target network type to allow changing the default network. If unset, the @@ -450,8 +449,8 @@ type IPv4OVNKubernetesConfig struct { // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" - // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[0]].all(x, x != '0' && int(x) <= 255 && !x.startsWith('0'))",message="first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255" - // +kubebuilder:validation:XValidation:rule="[int(self.split('/')[1])].all(x, x <= 30 && x >= 0)",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" // +optional InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"` // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the @@ -464,8 +463,8 @@ type IPv4OVNKubernetesConfig struct { // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" - // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[0]].all(x, x != '0' && int(x) <= 255 && !x.startsWith('0'))",message="first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255" - // +kubebuilder:validation:XValidation:rule="[int(self.split('/')[1])].all(x, x <= 30 && x >= 0)",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" // +optional InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"` } @@ -484,10 +483,8 @@ type IPv6OVNKubernetesConfig struct { // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted // +kubebuilder:validation:MaxLength=48 - // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" - // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive" - // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments" - // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" // +optional InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"` // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the @@ -501,9 +498,7 @@ type IPv6OVNKubernetesConfig struct { // Note that IPV6 dual addresses are not permitted // +kubebuilder:validation:MaxLength=48 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" - // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive" - // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments" - // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" // +optional InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"` } @@ -581,11 +576,9 @@ type IPv4GatewayConfig struct { // The current default subnet is 169.254.169.0/29 // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 - // +kubebuilder:validation:XValidation:rule="self.indexOf('/') == self.lastIndexOf('/')",message="CIDR format must contain exactly one '/'" - // +kubebuilder:validation:XValidation:rule="[int(self.split('/')[1])].all(x, x <= 29 && x >= 0)",message="subnet must be in the range /0 to /29 inclusive" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split('.').size() == 4",message="a valid IPv4 address must contain 4 octets" - // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[0]].all(x, x != '0' && int(x) <= 255 && !x.startsWith('0'))",message="first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255" - // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[1], self.findAll('[0-9]+')[2], self.findAll('[0-9]+')[3]].all(x, int(x) <= 255 && (x == '0' || !x.startsWith('0')))",message="IP address octets must not contain leading zeros, and must be less or equal to 255" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 29",message="subnet must be in the range /0 to /29 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" // +optional InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` } @@ -601,19 +594,8 @@ type IPv6GatewayConfig struct { // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. // The current default subnet is fd69::/125 // Note that IPV6 dual addresses are not permitted - // +kubebuilder:validation:XValidation:rule="self.indexOf('/') == self.lastIndexOf('/')",message="CIDR format must contain exactly one '/'" - // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive" - // +kubebuilder:validation:XValidation:rule="self.indexOf('::') == self.lastIndexOf('::')",message="IPv6 addresses must contain at most one '::' and may only be shortened once" - // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=1 ? [self.split('/')[0].split(':', 8)[0]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=2 ? [self.split('/')[0].split(':', 8)[1]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=3 ? [self.split('/')[0].split(':', 8)[2]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=4 ? [self.split('/')[0].split(':', 8)[3]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=5 ? [self.split('/')[0].split(':', 8)[4]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=6 ? [self.split('/')[0].split(':', 8)[5]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=7 ? [self.split('/')[0].split(':', 8)[6]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=8 ? [self.split('/')[0].split(':', 8)[7]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8" - // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" // +optional InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index d41982f2a..da3ce4e10 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -885,6 +885,7 @@ func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + out.Ingress = in.Ingress return } @@ -1846,6 +1847,22 @@ func (in *IPv6OVNKubernetesConfig) DeepCopy() *IPv6OVNKubernetesConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressController) DeepCopyInto(out *IngressController) { *out = *in @@ -2990,7 +3007,13 @@ func (in *MachineConfigurationSpec) DeepCopy() *MachineConfigurationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineConfigurationStatus) DeepCopyInto(out *MachineConfigurationStatus) { *out = *in - in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.NodeDisruptionPolicyStatus.DeepCopyInto(&out.NodeDisruptionPolicyStatus) return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 22992a02a..a8c2213cf 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -154,6 +154,7 @@ etcds.operator.openshift.io: Capability: "" Category: coreoperators FeatureGates: + - EtcdBackendQuota - HardwareSpeed FilenameOperatorName: etcd FilenameOperatorOrdering: "01" @@ -319,8 +320,7 @@ machineconfigurations.operator.openshift.io: Version: v1 networks.operator.openshift.io: - Annotations: - include.release.openshift.io/self-managed-high-availability: "true" + Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/475 CRDName: networks.operator.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 95017ec93..1b8b18e3f 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -265,6 +265,7 @@ var map_ConsoleSpec = map[string]string{ "providers": "providers contains configuration for using specific service providers.", "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED", "plugins": "plugins defines a list of enabled console plugin names.", + "ingress": "ingress allows to configure the alternative ingress for the console. This field is intended for clusters without ingress capability, where access to routes is not possible.", } func (ConsoleSpec) SwaggerDoc() map[string]string { @@ -320,6 +321,16 @@ func (DeveloperConsoleCatalogTypes) SwaggerDoc() map[string]string { return map_DeveloperConsoleCatalogTypes } +var map_Ingress = map[string]string{ + "": "Ingress allows cluster admin to configure alternative ingress for the console.", + "consoleURL": "consoleURL is a URL to be used as the base console address. If not specified, the console route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. Make sure that appropriate ingress is set up at this URL. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", + "clientDownloadsURL": "clientDownloadsURL is a URL to be used as the address to download client binaries. If not specified, the downloads route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + var map_Perspective = map[string]string{ "": "Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown", "id": "id defines the id of the perspective. Example: \"dev\", \"admin\". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored.", @@ -701,6 +712,7 @@ func (EtcdList) SwaggerDoc() map[string]string { var map_EtcdSpec = map[string]string{ "controlPlaneHardwareSpeed": "HardwareSpeed allows user to change the etcd tuning profile which configures the latency parameters for heartbeat interval and leader election timeouts allowing the cluster to tolerate longer round-trip-times between etcd members. Valid values are \"\", \"Standard\" and \"Slower\".\n\t\"\" means no opinion and the platform is left to choose a reasonable default\n\twhich is subject to change without notice.", + "backendQuotaGiB": "backendQuotaGiB sets the etcd backend storage size limit in gibibytes. The value should be an integer not less than 8 and not more than 32. When not specified, the default value is 8.", } func (EtcdSpec) SwaggerDoc() map[string]string { @@ -1288,6 +1300,8 @@ func (MachineConfigurationSpec) SwaggerDoc() map[string]string { } var map_MachineConfigurationStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", } @@ -1346,7 +1360,7 @@ func (NodeDisruptionPolicyConfig) SwaggerDoc() map[string]string { } var map_NodeDisruptionPolicySpecAction = map[string]string{ - "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", + "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", "reload": "reload specifies the service to reload, only valid if type is reload", "restart": "restart specifies the service to restart, only valid if type is restart", } @@ -1393,7 +1407,7 @@ func (NodeDisruptionPolicyStatus) SwaggerDoc() map[string]string { } var map_NodeDisruptionPolicyStatusAction = map[string]string{ - "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", + "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", "reload": "reload specifies the service to reload, only valid if type is reload", "restart": "restart specifies the service to restart, only valid if type is restart", } diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index c6d60915d..0f00758e5 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -210,6 +210,7 @@ message SELinuxContextStrategyOptions { // +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true message SecurityContextConstraints { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index b57da3058..44db1cdd3 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -37,6 +37,7 @@ var AllowAllCapabilities corev1.Capability = "*" // +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type SecurityContextConstraints struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml index ea3967ab2..86f78058a 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml @@ -1,5 +1,6 @@ securitycontextconstraints.security.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/bootstrap-required: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/470 CRDName: securitycontextconstraints.security.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/hypershift/api/LICENSE b/vendor/github.com/openshift/hypershift/api/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go new file mode 100644 index 000000000..ccbd1a64c --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go @@ -0,0 +1,18 @@ +package v1beta1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// AgentNodePoolPlatform specifies the configuration of a NodePool when operating +// on the Agent platform. +type AgentNodePoolPlatform struct { + // AgentLabelSelector contains labels that must be set on an Agent in order to + // be selected for a Machine. + // +optional + AgentLabelSelector *metav1.LabelSelector `json:"agentLabelSelector,omitempty"` +} + +// AgentPlatformSpec specifies configuration for agent-based installations. +type AgentPlatformSpec struct { + // AgentNamespace is the namespace where to search for Agents for this cluster + AgentNamespace string `json:"agentNamespace"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go new file mode 100644 index 000000000..5b9b74b2a --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go @@ -0,0 +1,864 @@ +package v1beta1 + +// AWSNodePoolPlatform specifies the configuration of a NodePool when operating +// on AWS. +type AWSNodePoolPlatform struct { + // InstanceType is an ec2 instance type for node instances (e.g. m5.large). + InstanceType string `json:"instanceType"` + + // InstanceProfile is the AWS EC2 instance profile, which is a container for an IAM role that the EC2 instance uses. + InstanceProfile string `json:"instanceProfile,omitempty"` + + // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id.startsWith('subnet-') ? !has(self.filters) : size(self.filters) > 0", message="subnet is invalid, a valid subnet id or filters must be set, but not both" + // +kubebuilder:validation:Required + // + // Subnet is the subnet to use for node instances. + Subnet AWSResourceReference `json:"subnet"` + + // AMI is the image id to use for node instances. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + AMI string `json:"ami,omitempty"` + + // SecurityGroups is an optional set of security groups to associate with node + // instances. + // + // +optional + SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` + + // RootVolume specifies configuration for the root volume of node instances. + // + // +optional + RootVolume *Volume `json:"rootVolume,omitempty"` + + // ResourceTags is an optional list of additional tags to apply to AWS node + // instances. + // + // These will be merged with HostedCluster scoped tags, and HostedCluster tags + // take precedence in case of conflicts. + // + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for + // information on tagging AWS resources. AWS supports a maximum of 50 tags per + // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available + // for the user. + // + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // placement specifies the placement options for the EC2 instances. + // + // +optional + Placement *PlacementOptions `json:"placement,omitempty"` +} + +// PlacementOptions specifies the placement options for the EC2 instances. +type PlacementOptions struct { + // Tenancy indicates if instance should run on shared or single-tenant hardware. + // + // Possible values: + // default: NodePool instances run on shared hardware. + // dedicated: Each NodePool instance runs on single-tenant hardware. + // host: NodePool instances run on user's pre-allocated dedicated hosts. + // + // +optional + // +kubebuilder:validation:Enum:=default;dedicated;host + Tenancy string `json:"tenancy,omitempty"` +} + +// AWSResourceReference is a reference to a specific AWS resource by ID or filters. +// Only one of ID or Filters may be specified. Specifying more than one will result in +// a validation error. +type AWSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Filters is a set of key/value pairs used to identify a resource + // They are applied according to the rules defined by the AWS API: + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html + // +optional + Filters []Filter `json:"filters,omitempty"` +} + +// Filter is a filter used to identify an AWS resource +type Filter struct { + // Name of the filter. Filter names are case-sensitive. + Name string `json:"name"` + + // Values includes one or more filter values. Filter values are case-sensitive. + Values []string `json:"values"` +} + +// Volume specifies the configuration options for node instance storage devices. +type Volume struct { + // Size specifies size (in Gi) of the storage device. + // + // Must be greater than the image snapshot size or 8 (whichever is greater). + // + // +kubebuilder:validation:Minimum=8 + Size int64 `json:"size"` + + // Type is the type of the volume. + Type string `json:"type"` + + // IOPS is the number of IOPS requested for the disk. This is only valid + // for type io1. + // + // +optional + IOPS int64 `json:"iops,omitempty"` + + // Encrypted is whether the volume should be encrypted or not. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Encrypted is immutable" + Encrypted *bool `json:"encrypted,omitempty"` + + // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + // If Encrypted is set and this is omitted, the default AWS key will be used. + // The key must already exist and be accessible by the controller. + // +optional + EncryptionKey string `json:"encryptionKey,omitempty"` +} + +// AWSCloudProviderConfig specifies AWS networking configuration. +type AWSCloudProviderConfig struct { + // Subnet is the subnet to use for control plane cloud resources. + // + // +optional + Subnet *AWSResourceReference `json:"subnet,omitempty"` + + // Zone is the availability zone where control plane cloud resources are + // created. + // + // +optional + Zone string `json:"zone,omitempty"` + + // VPC is the VPC to use for control plane cloud resources. + VPC string `json:"vpc"` +} + +// AWSEndpointAccessType specifies the publishing scope of cluster endpoints. +type AWSEndpointAccessType string + +const ( + // Public endpoint access allows public API server access and public node + // communication with the control plane. + Public AWSEndpointAccessType = "Public" + + // PublicAndPrivate endpoint access allows public API server access and + // private node communication with the control plane. + PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate" + + // Private endpoint access allows only private API server access and private + // node communication with the control plane. + Private AWSEndpointAccessType = "Private" +) + +// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services. +type AWSPlatformSpec struct { + // Region is the AWS region in which the cluster resides. This configures the + // OCP control plane cloud integrations, and is used by NodePool to resolve + // the correct boot AMI for a given release. + // + // +immutable + Region string `json:"region"` + + // CloudProviderConfig specifies AWS networking configuration for the control + // plane. + // This is mainly used for cloud provider controller config: + // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364 + // TODO(dan): should this be named AWSNetworkConfig? + // + // +optional + // +immutable + CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"` + + // ServiceEndpoints specifies optional custom endpoints which will override + // the default service endpoint of specific AWS Services. + // + // There must be only one ServiceEndpoint for a given service name. + // + // +optional + // +immutable + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // RolesRef contains references to various AWS IAM roles required to enable + // integrations such as OIDC. + // + // +immutable + RolesRef AWSRolesRef `json:"rolesRef"` + + // ResourceTags is a list of additional tags to apply to AWS resources created + // for the cluster. See + // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for + // information on tagging AWS resources. AWS supports a maximum of 50 tags per + // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available + // for the user. + // + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // EndpointAccess specifies the publishing scope of cluster endpoints. The + // default is Public. + // + // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private + // +kubebuilder:default=Public + // +optional + EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"` + + // AdditionalAllowedPrincipals specifies a list of additional allowed principal ARNs + // to be added to the hosted control plane's VPC Endpoint Service to enable additional + // VPC Endpoint connection requests to be automatically accepted. + // See https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html + // for more details around VPC Endpoint Service allowed principals. + // + // +optional + AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` + + // MultiArch specifies whether the Hosted Cluster will be expected to support NodePools with different + // CPU architectures, i.e., supporting arm64 NodePools and supporting amd64 NodePools on the same Hosted Cluster. + // Deprecated: This field is no longer used. The HyperShift Operator now performs multi-arch validations + // automatically despite the platform type. The HyperShift Operator will set HostedCluster.Status.PayloadArch based + // on the HostedCluster release image. This field is used by the NodePool controller to validate the + // NodePool.Spec.Arch is supported. + // +kubebuilder:default=false + // +optional + MultiArch bool `json:"multiArch"` + + // SharedVPC contains fields that must be specified if the HostedCluster must use a VPC that is + // created in a different AWS account and is shared with the AWS account where the HostedCluster + // will be created. + // + // +optional + SharedVPC *AWSSharedVPC `json:"sharedVPC,omitempty"` +} + +// AWSSharedVPC contains fields needed to create a HostedCluster using a VPC that has been +// created and shared from a different AWS account than the AWS account where the cluster +// is getting created. +type AWSSharedVPC struct { + + // RolesRef contains references to roles in the VPC owner account that enable a + // HostedCluster on a shared VPC. + // + // +kubebuilder:validation:Required + // +required + RolesRef AWSSharedVPCRolesRef `json:"rolesRef"` + + // LocalZoneID is the ID of the route53 hosted zone for [cluster-name].hypershift.local that is + // associated with the HostedCluster's VPC and exists in the VPC owner account. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=32 + // +required + LocalZoneID string `json:"localZoneID"` +} + +type AWSRoleCredentials struct { + ARN string `json:"arn"` + Namespace string `json:"namespace"` + Name string `json:"name"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // Key is the key of the tag. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + Key string `json:"key"` + // Value is the value of the tag. + // + // Some AWS service do not support empty values. Since tags are added to + // resources in many services, the length of the tag value must meet the + // requirements of all services. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + Value string `json:"value"` +} + +// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. +type AWSRolesRef struct { + // The referenced role must have a trust relationship that allows it to be assumed via web identity. + // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Principal": { + // "Federated": "{{ .ProviderARN }}" + // }, + // "Action": "sts:AssumeRoleWithWebIdentity", + // "Condition": { + // "StringEquals": { + // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} + // } + // } + // } + // ] + // } + // + // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "elasticloadbalancing:DescribeLoadBalancers", + // "tag:GetResources", + // "route53:ListHostedZones" + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ChangeResourceRecordSets" + // ], + // "Resource": [ + // "arn:aws:route53:::PUBLIC_ZONE_ID", + // "arn:aws:route53:::PRIVATE_ZONE_ID" + // ] + // } + // ] + // } + IngressARN string `json:"ingressARN"` + + // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "s3:CreateBucket", + // "s3:DeleteBucket", + // "s3:PutBucketTagging", + // "s3:GetBucketTagging", + // "s3:PutBucketPublicAccessBlock", + // "s3:GetBucketPublicAccessBlock", + // "s3:PutEncryptionConfiguration", + // "s3:GetEncryptionConfiguration", + // "s3:PutLifecycleConfiguration", + // "s3:GetLifecycleConfiguration", + // "s3:GetBucketLocation", + // "s3:ListBucket", + // "s3:GetObject", + // "s3:PutObject", + // "s3:DeleteObject", + // "s3:ListBucketMultipartUploads", + // "s3:AbortMultipartUpload", + // "s3:ListMultipartUploadParts" + // ], + // "Resource": "*" + // } + // ] + // } + ImageRegistryARN string `json:"imageRegistryARN"` + + // StorageARN is an ARN value referencing a role appropriate for the Storage Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:AttachVolume", + // "ec2:CreateSnapshot", + // "ec2:CreateTags", + // "ec2:CreateVolume", + // "ec2:DeleteSnapshot", + // "ec2:DeleteTags", + // "ec2:DeleteVolume", + // "ec2:DescribeInstances", + // "ec2:DescribeSnapshots", + // "ec2:DescribeTags", + // "ec2:DescribeVolumes", + // "ec2:DescribeVolumesModifications", + // "ec2:DetachVolume", + // "ec2:ModifyVolume" + // ], + // "Resource": "*" + // } + // ] + // } + StorageARN string `json:"storageARN"` + + // NetworkARN is an ARN value referencing a role appropriate for the Network Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:DescribeInstances", + // "ec2:DescribeInstanceStatus", + // "ec2:DescribeInstanceTypes", + // "ec2:UnassignPrivateIpAddresses", + // "ec2:AssignPrivateIpAddresses", + // "ec2:UnassignIpv6Addresses", + // "ec2:AssignIpv6Addresses", + // "ec2:DescribeSubnets", + // "ec2:DescribeNetworkInterfaces" + // ], + // "Resource": "*" + // } + // ] + // } + NetworkARN string `json:"networkARN"` + + // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. + // Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Action": [ + // "autoscaling:DescribeAutoScalingGroups", + // "autoscaling:DescribeLaunchConfigurations", + // "autoscaling:DescribeTags", + // "ec2:DescribeAvailabilityZones", + // "ec2:DescribeInstances", + // "ec2:DescribeImages", + // "ec2:DescribeRegions", + // "ec2:DescribeRouteTables", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeSubnets", + // "ec2:DescribeVolumes", + // "ec2:CreateSecurityGroup", + // "ec2:CreateTags", + // "ec2:CreateVolume", + // "ec2:ModifyInstanceAttribute", + // "ec2:ModifyVolume", + // "ec2:AttachVolume", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:CreateRoute", + // "ec2:DeleteRoute", + // "ec2:DeleteSecurityGroup", + // "ec2:DeleteVolume", + // "ec2:DetachVolume", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:DescribeVpcs", + // "elasticloadbalancing:AddTags", + // "elasticloadbalancing:AttachLoadBalancerToSubnets", + // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + // "elasticloadbalancing:CreateLoadBalancer", + // "elasticloadbalancing:CreateLoadBalancerPolicy", + // "elasticloadbalancing:CreateLoadBalancerListeners", + // "elasticloadbalancing:ConfigureHealthCheck", + // "elasticloadbalancing:DeleteLoadBalancer", + // "elasticloadbalancing:DeleteLoadBalancerListeners", + // "elasticloadbalancing:DescribeLoadBalancers", + // "elasticloadbalancing:DescribeLoadBalancerAttributes", + // "elasticloadbalancing:DetachLoadBalancerFromSubnets", + // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + // "elasticloadbalancing:ModifyLoadBalancerAttributes", + // "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + // "elasticloadbalancing:AddTags", + // "elasticloadbalancing:CreateListener", + // "elasticloadbalancing:CreateTargetGroup", + // "elasticloadbalancing:DeleteListener", + // "elasticloadbalancing:DeleteTargetGroup", + // "elasticloadbalancing:DeregisterTargets", + // "elasticloadbalancing:DescribeListeners", + // "elasticloadbalancing:DescribeLoadBalancerPolicies", + // "elasticloadbalancing:DescribeTargetGroups", + // "elasticloadbalancing:DescribeTargetHealth", + // "elasticloadbalancing:ModifyListener", + // "elasticloadbalancing:ModifyTargetGroup", + // "elasticloadbalancing:RegisterTargets", + // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + // "iam:CreateServiceLinkedRole", + // "kms:DescribeKey" + // ], + // "Resource": [ + // "*" + // ], + // "Effect": "Allow" + // } + // ] + // } + // +immutable + KubeCloudControllerARN string `json:"kubeCloudControllerARN"` + + // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Action": [ + // "ec2:AssociateRouteTable", + // "ec2:AttachInternetGateway", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:CreateInternetGateway", + // "ec2:CreateNatGateway", + // "ec2:CreateRoute", + // "ec2:CreateRouteTable", + // "ec2:CreateSecurityGroup", + // "ec2:CreateSubnet", + // "ec2:CreateTags", + // "ec2:DeleteInternetGateway", + // "ec2:DeleteNatGateway", + // "ec2:DeleteRouteTable", + // "ec2:DeleteSecurityGroup", + // "ec2:DeleteSubnet", + // "ec2:DeleteTags", + // "ec2:DescribeAccountAttributes", + // "ec2:DescribeAddresses", + // "ec2:DescribeAvailabilityZones", + // "ec2:DescribeImages", + // "ec2:DescribeInstances", + // "ec2:DescribeInternetGateways", + // "ec2:DescribeNatGateways", + // "ec2:DescribeNetworkInterfaces", + // "ec2:DescribeNetworkInterfaceAttribute", + // "ec2:DescribeRouteTables", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeSubnets", + // "ec2:DescribeVpcs", + // "ec2:DescribeVpcAttribute", + // "ec2:DescribeVolumes", + // "ec2:DetachInternetGateway", + // "ec2:DisassociateRouteTable", + // "ec2:DisassociateAddress", + // "ec2:ModifyInstanceAttribute", + // "ec2:ModifyNetworkInterfaceAttribute", + // "ec2:ModifySubnetAttribute", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RunInstances", + // "ec2:TerminateInstances", + // "tag:GetResources", + // "ec2:CreateLaunchTemplate", + // "ec2:CreateLaunchTemplateVersion", + // "ec2:DescribeLaunchTemplates", + // "ec2:DescribeLaunchTemplateVersions", + // "ec2:DeleteLaunchTemplate", + // "ec2:DeleteLaunchTemplateVersions" + // ], + // "Resource": [ + // "*" + // ], + // "Effect": "Allow" + // }, + // { + // "Condition": { + // "StringLike": { + // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" + // } + // }, + // "Action": [ + // "iam:CreateServiceLinkedRole" + // ], + // "Resource": [ + // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" + // ], + // "Effect": "Allow" + // }, + // { + // "Action": [ + // "iam:PassRole" + // ], + // "Resource": [ + // "arn:*:iam::*:role/*-worker-role" + // ], + // "Effect": "Allow" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "kms:Decrypt", + // "kms:ReEncrypt", + // "kms:GenerateDataKeyWithoutPlainText", + // "kms:DescribeKey" + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "kms:CreateGrant" + // ], + // "Resource": "*", + // "Condition": { + // "Bool": { + // "kms:GrantIsForAWSResource": true + // } + // } + // } + // ] + // } + // + // +immutable + NodePoolManagementARN string `json:"nodePoolManagementARN"` + + // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:CreateVpcEndpoint", + // "ec2:DescribeVpcEndpoints", + // "ec2:ModifyVpcEndpoint", + // "ec2:DeleteVpcEndpoints", + // "ec2:CreateTags", + // "route53:ListHostedZones", + // "ec2:CreateSecurityGroup", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:AuthorizeSecurityGroupEgress", + // "ec2:DeleteSecurityGroup", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RevokeSecurityGroupEgress", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeVpcs", + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "arn:aws:route53:::%s" + // } + // ] + // } + // +immutable + ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"` +} + +// AWSSharedVPCRolesRef contains references to AWS IAM roles required for a shared VPC hosted cluster. +// These roles must exist in the VPC owner's account. +type AWSSharedVPCRolesRef struct { + // IngressARN is an ARN value referencing the role in the VPC owner account that allows the + // ingress operator in the cluster account to create and manage records in the private DNS + // hosted zone. + // + // The referenced role must have a trust relationship that allows it to be assumed by the + // ingress operator role in the VPC creator account. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Sid": "Statement1", + // "Effect": "Allow", + // "Principal": { + // "AWS": "arn:aws:iam::[cluster-creator-account-id]:role/[infra-id]-openshift-ingress" + // }, + // "Action": "sts:AssumeRole" + // } + // ] + // } + // + // The following is an example of the policy document for this role. + // (Based on https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa-shared-vpc-config.html#rosa-sharing-vpc-dns-and-roles_rosa-shared-vpc-config) + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ListHostedZones", + // "route53:ListHostedZonesByName", + // "route53:ChangeTagsForResource", + // "route53:GetAccountLimit", + // "route53:GetChange", + // "route53:GetHostedZone", + // "route53:ListTagsForResource", + // "route53:UpdateHostedZoneComment", + // "tag:GetResources", + // "tag:UntagResources" + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "*" + // }, + // ] + // } + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +required + IngressARN string `json:"ingressARN"` + + // ControlPlaneARN is an ARN value referencing the role in the VPC owner account that allows + // the control plane operator in the cluster account to create and manage a VPC endpoint, its + // corresponding Security Group, and DNS records in the hypershift local hosted zone. + // + // The referenced role must have a trust relationship that allows it to be assumed by the + // control plane operator role in the VPC creator account. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Sid": "Statement1", + // "Effect": "Allow", + // "Principal": { + // "AWS": "arn:aws:iam::[cluster-creator-account-id]:role/[infra-id]-control-plane-operator" + // }, + // "Action": "sts:AssumeRole" + // } + // ] + // } + // + // The following is an example of the policy document for this role. + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:CreateVpcEndpoint", + // "ec2:DescribeVpcEndpoints", + // "ec2:ModifyVpcEndpoint", + // "ec2:DeleteVpcEndpoints", + // "ec2:CreateTags", + // "route53:ListHostedZones", + // "ec2:CreateSecurityGroup", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:AuthorizeSecurityGroupEgress", + // "ec2:DeleteSecurityGroup", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RevokeSecurityGroupEgress", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeVpcs", + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "*" + // } + // ] + // } + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +required + ControlPlaneARN string `json:"controlPlaneARN"` +} + +// AWSServiceEndpoint stores the configuration for services to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // Name is the name of the AWS service. + // This must be provided and cannot be empty. + Name string `json:"name"` + + // URL is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider +type AWSKMSSpec struct { + // Region contains the AWS region + Region string `json:"region"` + // ActiveKey defines the active key used to encrypt new secrets + ActiveKey AWSKMSKeyEntry `json:"activeKey"` + // BackupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *AWSKMSKeyEntry `json:"backupKey,omitempty"` + // Auth defines metadata about the management of credentials used to interact with AWS KMS + Auth AWSKMSAuthSpec `json:"auth"` +} + +// AWSKMSAuthSpec defines metadata about the management of credentials used to interact and encrypt data via AWS KMS key. +type AWSKMSAuthSpec struct { + // The referenced role must have a trust relationship that allows it to be assumed via web identity. + // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Principal": { + // "Federated": "{{ .ProviderARN }}" + // }, + // "Action": "sts:AssumeRoleWithWebIdentity", + // "Condition": { + // "StringEquals": { + // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} + // } + // } + // } + // ] + // } + // + // AWSKMSARN is an ARN value referencing a role appropriate for managing the auth via the AWS KMS key. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "kms:Encrypt", + // "kms:Decrypt", + // "kms:ReEncrypt*", + // "kms:GenerateDataKey*", + // "kms:DescribeKey" + // ], + // "Resource": %q + // } + // ] + // } + AWSKMSRoleARN string `json:"awsKms"` +} + +// AWSKMSKeyEntry defines metadata to locate the encryption key in AWS +type AWSKMSKeyEntry struct { + // ARN is the Amazon Resource Name for the encryption key + // +kubebuilder:validation:Pattern=`^arn:` + ARN string `json:"arn"` +} + +// AWSPlatformStatus contains status specific to the AWS platform +type AWSPlatformStatus struct { + // DefaultWorkerSecurityGroupID is the ID of a security group created by + // the control plane operator. It is always added to worker machines in + // addition to any security groups specified in the NodePool. + // +optional + DefaultWorkerSecurityGroupID string `json:"defaultWorkerSecurityGroupID,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go new file mode 100644 index 000000000..f27fcd2e9 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go @@ -0,0 +1,565 @@ +package v1beta1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +// AzureVMImageType is used to specify the source of the Azure VM boot image. +// Valid values are ImageID and AzureMarketplace. +// +kubebuilder:validation:Enum:=ImageID;AzureMarketplace +type AzureVMImageType string + +const ( + // ImageID is the used to specify that an Azure resource ID of a VHD image is used to boot the Azure VMs from. + ImageID AzureVMImageType = "ImageID" + + // AzureMarketplace is used to specify the Azure Marketplace image info to use to boot the Azure VMs from. + AzureMarketplace AzureVMImageType = "AzureMarketplace" +) + +// AzureNodePoolPlatform is the platform specific configuration for an Azure node pool. +type AzureNodePoolPlatform struct { + // vmSize is the Azure VM instance type to use for the nodes being created in the nodepool. + // The size naming convention is documented here https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. + // Size names should start with a Family name, which is represented by one of more capital letters, and then be followed by the CPU count. + // This is followed by 0 or more additional features, represented by a, b, d, i, l, m, p, t, s, C, and NP, refer to the Azure documentation for an explanation of these features. + // Optionally an accelerator such as a GPU can be added, prefixed by an underscore, for example A100, H100 or MI300X. + // The size may also be versioned, in which case it should be suffixed with _v where the version is a number. + // For example, "D32ads_v5" would be a suitable general purpose VM size, or "ND96_MI300X_v5" would represent a GPU accelerated VM. + // + // +kubebuilder:validation:Pattern=`^(Standard_|Basic_)?[A-Z]+[0-9]+(-[0-9]+)?[abdilmptsCNP]*(_[A-Z]*[0-9]+[A-Z]*)?(_v[0-9]+)?$` + // +kubebuilder:validation:Required + // + Azure VM size format described in https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions + // + "[A-Z]+[0-9]+(-[0-9]+)?" - Series, size and constrained CPU size + // + "[abdilmptsCNP]*" - Additive features + // + "(_[A-Z]*[0-9]+[A-Z]*)?" - Optional accelerator types + VMSize string `json:"vmSize"` + + // image is used to configure the VM boot image. If unset, the default image at the location below will be used and + // is expected to exist: subscription//resourceGroups//providers/Microsoft.Compute/images/rhcos.x86_64.vhd. + // The and the are expected to be the same resource group documented in the + // Hosted Cluster specification respectively, HostedCluster.Spec.Platform.Azure.SubscriptionID and + // HostedCluster.Spec.Platform.Azure.ResourceGroupName. + // + // +kubebuilder:validation:Required + Image AzureVMImage `json:"image"` + + // osDisk provides configuration for the OS disk for the nodepool. + // This can be used to configure the size, storage account type, encryption options and whether the disk is persistent or ephemeral. + // When not provided, the platform will choose reasonable defaults which are subject to change over time. + // Review the fields within the osDisk for more details. + OSDisk AzureNodePoolOSDisk `json:"osDisk"` + + // availabilityZone is the failure domain identifier where the VM should be attached to. This must not be specified + // for clusters in a location that does not support AvailabilityZone because it would cause a failure from Azure API. + //kubebuilder:validation:XValidation:rule='availabilityZone in ["1", "2", "3"]' + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // encryptionAtHost enables encryption at host on virtual machines. According to Microsoft documentation, this + // means data stored on the VM host is encrypted at rest and flows encrypted to the Storage service. See + // https://learn.microsoft.com/en-us/azure/virtual-machines/disks-enable-host-based-encryption-portal?tabs=azure-powershell + // for more information. + // + // +kubebuilder:default:=Enabled + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + EncryptionAtHost string `json:"encryptionAtHost,omitempty"` + + // subnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a + // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must + // exist in the same network, HostedCluster.Spec.Platform.Azure.VnetID, and must exist under the same subscription ID, + // HostedCluster.Spec.Platform.Azure.SubscriptionID. + // subnetID is immutable once set. + // The subnetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis and must not end with a period (.) character. + // The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods and must not end with either a period (.) or hyphen (-) character. + // The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character and must not end with a period (.) or hyphen (-) character. + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 11 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Network/virtualNetworks/.*/subnets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the subnetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[8].matches('[a-zA-Z0-9-_\\.]{2,64}')`,message="The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[8].endsWith('.') && !self.split('/')[8].endsWith('-')",message="the vnetName in the subnetID must not end with either a period (.) or hyphen (-) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[10].matches('[a-zA-Z0-9][a-zA-Z0-9-_\\.]{0,79}')`,message="The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[10].endsWith('.') && !self.split('/')[10].endsWith('-')",message="the subnetName in the subnetID must not end with a period (.) or hyphen (-) character" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=355 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" + // +kubebuilder:validation:Required + SubnetID string `json:"subnetID"` + + // diagnostics specifies the diagnostics settings for a virtual machine. + // If not specified, then Boot diagnostics will be disabled. + // +optional + Diagnostics *Diagnostics `json:"diagnostics,omitempty"` + + // machineIdentityID is a user-assigned identity assigned to the VMs used to authenticate with Azure services. The + // identify is expected to exist under the same resource group as HostedCluster.Spec.Platform.Azure.ResourceGroupName. This + // user assigned identity is expected to have the Contributor role assigned to it and scoped to the resource group + // under HostedCluster.Spec.Platform.Azure.ResourceGroupName. + // + // If this field is not supplied, the Service Principal credentials will be written to a file on the disk of each VM + // in order to be accessible by the cloud provider; the aforementioned credentials provided are the same ones as + // HostedCluster.Spec.Platform.Azure.Credentials. However, this is less secure than using a managed identity. + // + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +optional + MachineIdentityID string `json:"machineIdentityID,omitempty"` +} + +// AzureVMImage represents the different types of boot image sources that can be provided for an Azure VM. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ImageID' ? has(self.imageID) : !has(self.imageID)",message="imageID is required when type is ImageID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AzureMarketplace' ? has(self.azureMarketplace) : !has(self.azureMarketplace)",message="azureMarketplace is required when type is RequiredMember, and forbidden otherwise" +// +union +type AzureVMImage struct { + // type is the type of image data that will be provided to the Azure VM. + // Valid values are "ImageID" and "AzureMarketplace". + // ImageID means is used for legacy managed VM images. This is where the user uploads a VM image directly to their resource group. + // AzureMarketplace means the VM will boot from an Azure Marketplace image. + // Marketplace images are preconfigured and published by the OS vendors and may include preconfigured software for the VM. + // + // +kubebuilder:validation:Required + // +unionDiscriminator + Type AzureVMImageType `json:"type"` + + // imageID is the Azure resource ID of a VHD image to use to boot the Azure VMs from. + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +optional + // +unionMember + ImageID *string `json:"imageID,omitempty"` + + // azureMarketplace contains the Azure Marketplace image info to use to boot the Azure VMs from. + // + // +optional + // +unionMember + AzureMarketplace *AzureMarketplaceImage `json:"azureMarketplace,omitempty"` +} + +// AzureMarketplaceImage specifies the information needed to create an Azure VM from an Azure Marketplace image. +// + This struct replicates the same fields found in CAPZ - https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/main/api/v1beta1/types.go. +type AzureMarketplaceImage struct { + // publisher is the name of the organization that created the image. + // It must be between 3 and 50 characters in length, and consist of only lowercase letters, numbers, and hyphens (-) and underscores (_). + // It must start with a lowercase letter or a number. + // TODO: Can we explain where a user might find this value, or provide an example of one they might want to use + // + // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-_]{2,49}$` + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:MaxLength=50 + // +kubeubilder:validation:Required + Publisher string `json:"publisher"` + + // offer specifies the name of a group of related images created by the publisher. + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + Offer string `json:"offer"` + + // sku specifies an instance of an offer, such as a major release of a distribution. + // For example, 22_04-lts-gen2, 8-lvm-gen2. + // The value must consist only of lowercase letters, numbers, and hyphens (-) and underscores (_). + // TODO: What about length limits? + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-_]+$` + // +kubebuilder:validation:MinLength=1 + SKU string `json:"sku"` + + // version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, + // Minor, and Build are decimal numbers, e.g. '1.2.0'. Specify 'latest' to use the latest version of an image available at + // deployment time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a + // new version becomes available. + // + // +kubebuilder:validation:Pattern=`^[0-9]+\.[0-9]+\.[0-9]+$|^latest$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + Version string `json:"version"` +} + +// AzureDiagnosticsStorageAccountType specifies the type of storage account for storing Azure VM diagnostics data. +// +kubebuilder:validation:Enum=Managed;UserManaged;Disabled +type AzureDiagnosticsStorageAccountType string + +func (a *AzureDiagnosticsStorageAccountType) String() string { + return string(*a) +} + +func (a *AzureDiagnosticsStorageAccountType) Set(s string) error { + switch s { + case string(AzureDiagnosticsStorageAccountTypeDisabled), string(AzureDiagnosticsStorageAccountTypeManaged), string(AzureDiagnosticsStorageAccountTypeUserManaged): + *a = AzureDiagnosticsStorageAccountType(s) + return nil + default: + return fmt.Errorf("unknown Azure diagnostics storage account type: %s", s) + } +} + +func (a *AzureDiagnosticsStorageAccountType) Type() string { + return "AzureDiagnosticsStorageAccountType" +} + +const ( + AzureDiagnosticsStorageAccountTypeDisabled = AzureDiagnosticsStorageAccountType("Disabled") + AzureDiagnosticsStorageAccountTypeManaged = AzureDiagnosticsStorageAccountType("Managed") + AzureDiagnosticsStorageAccountTypeUserManaged = AzureDiagnosticsStorageAccountType("UserManaged") +) + +// Diagnostics specifies the diagnostics settings for a virtual machine. +// +kubebuilder:validation:XValidation:rule="self.storageAccountType == 'UserManaged' ? has(self.userManaged) : !has(self.userManaged)", message="userManaged is required when storageAccountType is UserManaged, and forbidden otherwise" +// +union +type Diagnostics struct { + // storageAccountType determines if the storage account for storing the diagnostics data + // should be disabled (Disabled), provisioned by Azure (Managed) or by the user (UserManaged). + // +kubebuilder:validation:Enum=Managed;UserManaged;Disabled + // +kubebuilder:default:=Disabled + // +unionDiscriminator + // +optional + StorageAccountType AzureDiagnosticsStorageAccountType `json:"storageAccountType,omitempty"` + + // userManaged specifies the diagnostics settings for a virtual machine when the storage account is managed by the user. + // +optional + // +unionMember + UserManaged *UserManagedDiagnostics `json:"userManaged,omitempty"` +} + +// UserManagedDiagnostics specifies the diagnostics settings for a virtual machine when the storage account is managed by the user. +type UserManagedDiagnostics struct { + // storageAccountURI is the URI of the user-managed storage account. + // The URI typically will be `https://.blob.core.windows.net/` + // but may differ if you are using Azure DNS zone endpoints. + // You can find the correct endpoint by looking for the Blob Primary Endpoint in the + // endpoints tab in the Azure console or with the CLI by issuing + // `az storage account list --query='[].{name: name, "resource group": resourceGroup, "blob endpoint": primaryEndpoints.blob}'`. + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'", message="storageAccountURI must be a valid HTTPS URL" + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:Required + StorageAccountURI string `json:"storageAccountURI,omitempty"` +} + +// +kubebuilder:validation:Enum=Standard;StandardSSD;PremiumSSD;UltraSSD +type AzureDiskStorageAccountType string + +const ( + // StandardStorageAccountType is the standard HDD storage account type. + StandardStorageAccountType AzureDiskStorageAccountType = "Standard" + + // StandardSSDStorageAccountType is the standard SSD storage account type. + StandardSSDStorageAccountType AzureDiskStorageAccountType = "StandardSSD" + + // PremiumSSDStorageAccountType is the premium SSD storage account type. + PremiumSSDStorageAccountType AzureDiskStorageAccountType = "PremiumSSD" + + // UltraSSDStorageAccountType is the ultra SSD storage account type. + UltraSSDStorageAccountType AzureDiskStorageAccountType = "UltraSSD" +) + +// +kubebuilder:validation:Enum=Persistent;Ephemeral +type AzureDiskPersistence string + +const ( + // PersistentDiskPersistence is the persistent disk type. + PersistentDiskPersistence AzureDiskPersistence = "Persistent" + + // EphemeralDiskPersistence is the ephemeral disk type. + EphemeralDiskPersistence AzureDiskPersistence = "Ephemeral" +) + +// +kubebuilder:validation:XValidation:rule="!has(self.diskStorageAccountType) || self.diskStorageAccountType != 'UltraSSD' || self.sizeGiB <= 32767",message="When not using storageAccountType UltraSSD, the SizeGB value must be less than or equal to 32,767" +type AzureNodePoolOSDisk struct { + // SizeGiB is the size in GiB (1024^3 bytes) to assign to the OS disk. + // This should be between 16 and 65,536 when using the UltraSSD storage account type and between 16 and 32,767 when using any other storage account type. + // When not set, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is 30. + // + // +kubebuilder:validation:Minimum=16 + // +kubebuilder:validation:Maximum=65536 + // +optional + SizeGiB int32 `json:"sizeGiB,omitempty"` + + // storageAccountType is the disk storage account type to use. + // Valid values are Standard, StandardSSD, PremiumSSD and UltraSSD and omitted. + // Note that Standard means a HDD. + // The disk performance is tied to the disk type, please refer to the Azure documentation for further details + // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison. + // When omitted this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is PremiumSSD. + // + // +optional + DiskStorageAccountType AzureDiskStorageAccountType `json:"diskStorageAccountType,omitempty"` + + // encryptionSetID is the ID of the DiskEncryptionSet resource to use to encrypt the OS disks for the VMs. + // Configuring a DiskEncyptionSet allows greater control over the encryption of the VM OS disk at rest. + // Can be used with either platform (Azure) managed, or customer managed encryption keys. + // This needs to exist in the same subscription id listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.SubscriptionID. + // DiskEncryptionSetID should also exist in a resource group under the same subscription id and the same location + // listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.Location. + // The encryptionSetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Copmute/diskEncryptionSets/{resourceName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis and must not end with a period (.) character. + // The resourceName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores. + // TODO: Are there other encryption related options we may want to expose, should this be in a struct as well? + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 9 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Compute/diskEncryptionSets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Copmute/diskEncryptionSets/{resourceName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the encryptionSetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule="self.split('/')[8].matches('[a-zA-Z0-9-_]{1,80}')",message="The resourceName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores" + // +kubeubilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=285 + // +optional + EncryptionSetID string `json:"encryptionSetID,omitempty"` + + // persistence determines whether the OS disk should be persisted beyond the life of the VM. + // Valid values are Persistent and Ephemeral. + // When set to Ephmeral, the OS disk will not be persisted to Azure storage and implies restrictions to the VM size and caching type. + // Full details can be found in the Azure documentation https://learn.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks. + // Ephmeral disks are primarily used for stateless applications, provide lower latency than Persistent disks and also incur no storage costs. + // When not set, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // + // +optional + Persistence AzureDiskPersistence `json:"persistence,omitempty"` +} + +// AzurePlatformSpec specifies configuration for clusters running on Azure. Generally, the HyperShift API assumes bring +// your own (BYO) cloud infrastructure resources. For example, resources like a resource group, a subnet, or a vnet +// would be pre-created and then their names would be used respectively in the ResourceGroupName, SubnetName, VnetName +// fields of the Hosted Cluster CR. An existing cloud resource is expected to exist under the same SubscriptionID. +type AzurePlatformSpec struct { + // Credentials is the object containing existing Azure credentials needed for creating and managing cloud + // infrastructure resources. + // + // +kubebuilder:validation:Required + // +required + Credentials corev1.LocalObjectReference `json:"credentials"` + + // Cloud is the cloud environment identifier, valid values could be found here: https://github.com/Azure/go-autorest/blob/4c0e21ca2bbb3251fe7853e6f9df6397f53dd419/autorest/azure/environments.go#L33 + // + // +kubebuilder:validation:Enum=AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud + // +kubebuilder:default="AzurePublicCloud" + Cloud string `json:"cloud,omitempty"` + + // Location is the Azure region in where all the cloud infrastructure resources will be created. + // + // Example: eastus + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Location is immutable" + // +immutable + // +required + Location string `json:"location"` + + // ResourceGroupName is the name of an existing resource group where all cloud resources created by the Hosted + // Cluster are to be placed. The resource group is expected to exist under the same subscription as SubscriptionID. + // + // In ARO HCP, this will be the managed resource group where customer cloud resources will be created. + // + // Resource group naming requirements can be found here: https://azure.github.io/PSRule.Rules.Azure/en/rules/Azure.ResourceGroup.Name/. + // + //Example: if your resource group ID is /subscriptions//resourceGroups/, your + // ResourceGroupName is . + // + // +kubebuilder:default:=default + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_()\-\.]{1,89}[a-zA-Z0-9_()\-]$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ResourceGroupName is immutable" + // +immutable + // +required + ResourceGroupName string `json:"resourceGroup"` + + // VnetID is the ID of an existing VNET to use in creating VMs. The VNET can exist in a different resource group + // other than the one specified in ResourceGroupName, but it must exist under the same subscription as + // SubscriptionID. + // + // In ARO HCP, this will be the ID of the customer provided VNET. + // + // Example: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/ + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="VnetID is immutable" + // +immutable + // +required + VnetID string `json:"vnetID,omitempty"` + + // subnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a + // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must + // exist in the same network, HostedCluster.Spec.Platform.Azure.VnetID, and must exist under the same subscription ID, + // HostedCluster.Spec.Platform.Azure.SubscriptionID. + // subnetID is immutable once set. + // The subnetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis and must not end with a period (.) character. + // The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods and must not end with either a period (.) or hyphen (-) character. + // The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character and must not end with a period (.) or hyphen (-) character. + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 11 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Network/virtualNetworks/.*/subnets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the subnetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[8].matches('[a-zA-Z0-9-_\\.]{2,64}')`,message="The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[8].endsWith('.') && !self.split('/')[8].endsWith('-')",message="the vnetName in the subnetID must not end with either a period (.) or hyphen (-) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[10].matches('[a-zA-Z0-9][a-zA-Z0-9-_\\.]{0,79}')`,message="The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[10].endsWith('.') && !self.split('/')[10].endsWith('-')",message="the subnetName in the subnetID must not end with a period (.) or hyphen (-) character" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=355 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" + // +kubebuilder:validation:Required + SubnetID string `json:"subnetID"` + + // SubscriptionID is a unique identifier for an Azure subscription used to manage resources. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SubscriptionID is immutable" + // +immutable + // +required + SubscriptionID string `json:"subscriptionID"` + + // SecurityGroupID is the ID of an existing security group on the SubnetID. This field is provided as part of the + // configuration for the Azure cloud provider, aka Azure cloud controller manager (CCM). This security group is + // expected to exist under the same subscription as SubscriptionID. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SecurityGroupID is immutable" + // +kubebuilder:validation:Required + // +immutable + // +required + SecurityGroupID string `json:"securityGroupID"` + + // managedIdentities contains the managed identities needed for HCP control plane and data plane components that + // authenticate with Azure's API. + // + // +kubebuilder:validation:Required + // +openshift:enable:FeatureGate=AROHCPManagedIdentities + ManagedIdentities AzureResourceManagedIdentities `json:"managedIdentities,omitempty"` +} + +// ManagedAzureKeyVault is an Azure Key Vault on the management cluster. +type ManagedAzureKeyVault struct { + // name is the name of the Azure Key Vault on the management cluster. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // tenantID is the tenant ID of the Azure Key Vault on the management cluster. + // + // +kubebuilder:validation:Required + TenantID string `json:"tenantID"` +} + +// AzureResourceManagedIdentities contains the managed identities needed for HCP control plane and data plane components +// that authenticate with Azure's API. +type AzureResourceManagedIdentities struct { + // controlPlane contains the client IDs of all the managed identities on the HCP control plane needing to + // authenticate with Azure's API. + // + // +kubebuilder:validation:Required + ControlPlane ControlPlaneManagedIdentities `json:"controlPlane"` + + // Future placeholder - DataPlaneMIs * DataPlaneManagedIdentities +} + +// ManagedIdentity contains the client ID, and its certificate name, of a managed identity. This managed identity is +// used, by an HCP component, to authenticate with the Azure API. +type ManagedIdentity struct { + // clientID is the client ID of a managed identity. + // + // +kubebuilder:validation:XValidation:rule="self.matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the client ID of a managed identity must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12." + // +kubebuilder:validation:Required + ClientID string `json:"clientID"` + + // certificateName is the name of the certificate backing the managed identity. This certificate is expected to + // reside in an Azure Key Vault on the management cluster. + // + // +kubebuilder:validation:Required + CertificateName string `json:"certificateName"` +} + +// ControlPlaneManagedIdentities contains the managed identities on the HCP control plane needing to authenticate with +// Azure's API. +type ControlPlaneManagedIdentities struct { + // managedIdentitiesKeyVault contains information on the management cluster's managed identities Azure Key Vault. + // This Key Vault is where the managed identities certificates are stored. These certificates are pulled out of the + // Key Vault by the Secrets Store CSI driver and mounted into a volume on control plane pods requiring + // authentication with Azure API. + // + // More information on how the Secrets Store CSI driver works to do this can be found here: + // https://learn.microsoft.com/en-us/azure/aks/csi-secrets-store-driver. + // + // +kubebuilder:validation:Required + ManagedIdentitiesKeyVault ManagedAzureKeyVault `json:"managedIdentitiesKeyVault"` + + // cloudProvider is a pre-existing managed identity associated with the azure cloud provider, aka cloud controller + // manager. + // + // +kubebuilder:validation:Required + CloudProvider ManagedIdentity `json:"cloudProvider"` + + // nodePoolManagement is a pre-existing managed identity associated with the operator managing the NodePools. + // + // +kubebuilder:validation:Required + NodePoolManagement ManagedIdentity `json:"nodePoolManagement"` + + // controlPlaneOperator is a pre-existing managed identity associated with the control plane operator. + // + // +kubebuilder:validation:Required + ControlPlaneOperator ManagedIdentity `json:"controlPlaneOperator"` + + // imageRegistry is a pre-existing managed identity associated with the cluster-image-registry-operator. + // + // +kubebuilder:validation:Required + ImageRegistry ManagedIdentity `json:"imageRegistry"` + + // ingress is a pre-existing managed identity associated with the cluster-ingress-operator. + // + // +kubebuilder:validation:Required + Ingress ManagedIdentity `json:"ingress"` + + // network is a pre-existing managed identity associated with the cluster-network-operator. + // + // +kubebuilder:validation:Required + Network ManagedIdentity `json:"network"` + + // diskClientID is a pre-existing managed identity associated with the azure-disk-controller. + // + // +kubebuilder:validation:Required + Disk ManagedIdentity `json:"disk"` + + // fileClientID is a pre-existing managed identity associated with the azure-disk-controller. + // + // +kubebuilder:validation:Required + File ManagedIdentity `json:"file"` +} + +// AzureKMSSpec defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault +type AzureKMSSpec struct { + // ActiveKey defines the active key used to encrypt new secrets + // + // +kubebuilder:validation:Required + ActiveKey AzureKMSKey `json:"activeKey"` + // BackupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *AzureKMSKey `json:"backupKey,omitempty"` + + // kms is a pre-existing managed identity used to authenticate with Azure KMS. + // + // +kubebuilder:validation:Required + // +openshift:enable:FeatureGate=AROHCPManagedIdentities + KMS ManagedIdentity `json:"kms"` +} + +type AzureKMSKey struct { + // KeyVaultName is the name of the keyvault. Must match criteria specified at https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#vault-name-and-object-name + // Your Microsoft Entra application used to create the cluster must be authorized to access this keyvault, e.g using the AzureCLI: + // `az keyvault set-policy -n $KEYVAULT_NAME --key-permissions decrypt encrypt --spn ` + KeyVaultName string `json:"keyVaultName"` + // KeyName is the name of the keyvault key used for encrypt/decrypt + KeyName string `json:"keyName"` + // KeyVersion contains the version of the key to use + KeyVersion string `json:"keyVersion"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go new file mode 100644 index 000000000..37afedc2d --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go @@ -0,0 +1,33 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:resource:path=certificatesigningrequestapprovals,shortName=csra;csras,scope=Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// CertificateSigningRequestApproval defines the desired state of CertificateSigningRequestApproval +type CertificateSigningRequestApproval struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + Status CertificateSigningRequestApprovalStatus `json:"status,omitempty"` +} + +// CertificateSigningRequestApprovalSpec defines the desired state of CertificateSigningRequestApproval +type CertificateSigningRequestApprovalSpec struct{} + +// CertificateSigningRequestApprovalStatus defines the observed state of CertificateSigningRequestApproval +type CertificateSigningRequestApprovalStatus struct{} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// CertificateSigningRequestApprovalList contains a list of CertificateSigningRequestApprovals. +type CertificateSigningRequestApprovalList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CertificateSigningRequestApproval `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go new file mode 100644 index 000000000..f2fd3e755 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go @@ -0,0 +1,43 @@ +package v1beta1 + +import configv1 "github.com/openshift/api/config/v1" + +func (c *ClusterConfiguration) GetAPIServer() *configv1.APIServerSpec { return c.APIServer } +func (c *ClusterConfiguration) GetAuthentication() *configv1.AuthenticationSpec { + return c.Authentication +} +func (c *ClusterConfiguration) GetFeatureGate() *configv1.FeatureGateSpec { return c.FeatureGate } +func (c *ClusterConfiguration) GetImage() *configv1.ImageSpec { return c.Image } +func (c *ClusterConfiguration) GetIngress() *configv1.IngressSpec { return c.Ingress } +func (c *ClusterConfiguration) GetNetwork() *configv1.NetworkSpec { return c.Network } +func (c *ClusterConfiguration) GetOAuth() *configv1.OAuthSpec { return c.OAuth } +func (c *ClusterConfiguration) GetScheduler() *configv1.SchedulerSpec { return c.Scheduler } +func (c *ClusterConfiguration) GetProxy() *configv1.ProxySpec { return c.Proxy } + +func (c *ClusterConfiguration) GetTLSSecurityProfile() *configv1.TLSSecurityProfile { + if c != nil && c.APIServer != nil { + return c.APIServer.TLSSecurityProfile + } + return nil +} + +func (c *ClusterConfiguration) GetAutoAssignCIDRs() []string { + if c != nil && c.Network != nil && c.Network.ExternalIP != nil { + return c.Network.ExternalIP.AutoAssignCIDRs + } + return nil +} + +func (c *ClusterConfiguration) GetAuditPolicyConfig() configv1.Audit { + if c != nil && c.APIServer != nil && c.APIServer.Audit.Profile != "" { + return c.APIServer.Audit + } + return configv1.Audit{Profile: configv1.DefaultAuditProfileType} +} + +func (c *ClusterConfiguration) GetFeatureGateSelection() configv1.FeatureGateSelection { + if c != nil && c.FeatureGate != nil { + return c.FeatureGate.FeatureGateSelection + } + return configv1.FeatureGateSelection{FeatureSet: configv1.Default} +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go new file mode 100644 index 000000000..6ca9e97ac --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go @@ -0,0 +1,96 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ControlPlaneComponent{}, + &ControlPlaneComponentList{}, + ) + return nil + }) +} + +const ( + // ControlPlaneComponentAvailable indicates whether the ControlPlaneComponent is available. + ControlPlaneComponentAvailable ConditionType = "Available" + // ControlPlaneComponentProgressing indicates whether the ControlPlaneComponent is progressing. + ControlPlaneComponentProgressing ConditionType = "Progressing" + + // WaitingForDependenciesReason indicates that there are unavailable dependencies blocking the ControlPlaneComponent reconcilation. + WaitingForDependenciesReason string = "WaitingForDependencies" + // ReconciliationErrorReason indicates that there was an error during the reconcilation of the ControlPlaneComponent. + ReconciliationErrorReason string = "ReconciliationError" +) + +// ControlPlaneComponentSpec defines the desired state of ControlPlaneComponent +type ControlPlaneComponentSpec struct { +} + +// ComponentResource defines a resource reconciled by a ControlPlaneComponent. +type ComponentResource struct { + // kind is the name of the resource schema. + // +required + Kind string `json:"kind"` + + // group is the API group for this resource type. + // +required + Group string `json:"group"` + + // name is the name of this resource. + // +required + Name string `json:"name"` +} + +// ControlPlaneComponentStatus defines the observed state of ControlPlaneComponent +type ControlPlaneComponentStatus struct { + // version reports the current version of this component. + // +optional + Version string `json:"version,omitempty"` + + // resources is a list of the resources reconciled by this component. + // +optional + Resources []ComponentResource `json:"resources,omitempty"` + + // Conditions contains details for the current state of the ControlPlane Component. + // If there is an error, then the Available condition will be false. + // + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=controlplanecomponents,shortName=cpc;cpcs,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Version" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status",description="Available" +// +kubebuilder:printcolumn:name="Progressing",type="string",JSONPath=".status.conditions[?(@.type==\"Progressing\")].status",description="Progressing" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message",description="Message" +// +kubebuilder:printcolumn:name="ProgressingMessage",type="string",priority=1,JSONPath=".status.conditions[?(@.type==\"Progressing\")].message",description="ProgressingMessage" +// ControlPlaneComponent specifies the state of a ControlPlane Component +// +openshift:enable:FeatureGate=ControlPlaneV2 +type ControlPlaneComponent struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ControlPlaneComponentSpec `json:"spec,omitempty"` + Status ControlPlaneComponentStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// ControlPlaneComponentList contains a list of ControlPlaneComponent +type ControlPlaneComponentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ControlPlaneComponent `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/doc.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/doc.go new file mode 100644 index 000000000..01fb78488 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/doc.go @@ -0,0 +1,14 @@ +/* +Package v1beta1 contains the HyperShift API. + +The HyperShift API enables creating and managing lightweight, flexible, heterogeneous +OpenShift clusters at scale. + +HyperShift clusters are deployed in a topology which isolates the "control plane" +(e.g. etcd, the API server, controller manager, etc.) from the "data plane" (e.g. +worker nodes and their kubelets, and the infrastructure on which they run). This +enables "hosted control plane as a service" use cases. +*/ +// +kubebuilder:object:generate=true +// +groupName=hypershift.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go new file mode 100644 index 000000000..c0850c171 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go @@ -0,0 +1,101 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AWSEndpointService{}, + &AWSEndpointServiceList{}, + ) + return nil + }) +} + +// The following are reasons for the IgnitionEndpointAvailable condition. +const ( + // AWSEndpointServiceAvailable indicates whether the AWS Endpoint Service + // has been created for the specified NLB in the management VPC + AWSEndpointServiceAvailable ConditionType = "AWSEndpointServiceAvailable" + + // AWSEndpointServiceAvailable indicates whether the AWS Endpoint has been + // created in the guest VPC + AWSEndpointAvailable ConditionType = "AWSEndpointAvailable" + + AWSSuccessReason string = "AWSSuccess" + AWSErrorReason string = "AWSError" +) + +// AWSEndpointServiceSpec defines the desired state of AWSEndpointService +type AWSEndpointServiceSpec struct { + // The name of the NLB for which an Endpoint Service should be configured + NetworkLoadBalancerName string `json:"networkLoadBalancerName"` + + // SubnetIDs is the list of subnet IDs to which guest nodes can attach + // +optional + SubnetIDs []string `json:"subnetIDs,omitempty"` + + // Tags to apply to the EndpointService + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` +} + +// AWSEndpointServiceStatus defines the observed state of AWSEndpointService +type AWSEndpointServiceStatus struct { + // EndpointServiceName is the name of the Endpoint Service created in the + // management VPC + // +optional + EndpointServiceName string `json:"endpointServiceName,omitempty"` + + // EndpointID is the ID of the Endpoint created in the guest VPC + // +optional + EndpointID string `json:"endpointID,omitempty"` + + // DNSName are the names for the records created in the hypershift private zone + // +optional + DNSNames []string `json:"dnsNames,omitempty"` + + // DNSZoneID is ID for the hypershift private zone + // +optional + DNSZoneID string `json:"dnsZoneID,omitempty"` + + // SecurityGroupID is the ID for the VPC endpoint SecurityGroup + SecurityGroupID string `json:"securityGroupID,omitempty"` + + // Conditions contains details for the current state of the Endpoint Service + // request If there is an error processing the request e.g. the NLB doesn't + // exist, then the Available condition will be false, reason AWSErrorReason, + // and the error reported in the message. + // + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsendpointservices,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// AWSEndpointService specifies a request for an Endpoint Service in AWS +type AWSEndpointService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSEndpointServiceSpec `json:"spec,omitempty"` + Status AWSEndpointServiceStatus `json:"status,omitempty"` +} + +// AWSEndpointServiceList contains a list of AWSEndpointService +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AWSEndpointServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSEndpointService `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go new file mode 100644 index 000000000..3241a27e4 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go @@ -0,0 +1,40 @@ +// Package v1alpha1 contains API Schema definitions for the hypershift.openshift.io v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=hypershift.openshift.io +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "hypershift.openshift.io", Version: "v1beta1"} + + SchemeGroupVersion = GroupVersion + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go new file mode 100644 index 000000000..82b9441f5 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go @@ -0,0 +1,325 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + configv1 "github.com/openshift/api/config/v1" +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HostedControlPlane{}, + &HostedControlPlaneList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil + }) +} + +// HostedControlPlane defines the desired state of HostedControlPlane +// +genclient +// +kubebuilder:resource:path=hostedcontrolplanes,shortName=hcp;hcps,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:object:root=true +type HostedControlPlane struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HostedControlPlaneSpec `json:"spec,omitempty"` + Status HostedControlPlaneStatus `json:"status,omitempty"` +} + +// HostedControlPlaneSpec defines the desired state of HostedControlPlane +type HostedControlPlaneSpec struct { + // ReleaseImage is the release image applied to the hosted control plane. + ReleaseImage string `json:"releaseImage"` + + // ControlPlaneReleaseImage specifies the desired OCP release payload for + // control plane components running on the management cluster. + // If not defined, ReleaseImage is used + ControlPlaneReleaseImage *string `json:"controlPlaneReleaseImage,omitempty"` + + // updateService may be used to specify the preferred upstream update service. + // By default it will use the appropriate update service for the cluster and region. + // + // +optional + UpdateService configv1.URL `json:"updateService,omitempty"` + + // channel is an identifier for explicitly requesting that a non-default + // set of updates be applied to this cluster. The default channel will be + // contain stable updates that are appropriate for production clusters. + // + // +optional + Channel string `json:"channel,omitempty"` + + PullSecret corev1.LocalObjectReference `json:"pullSecret"` + + // IssuerURL is an OIDC issuer URL which is used as the issuer in all + // ServiceAccount tokens generated by the control plane API server. The + // default value is kubernetes.default.svc, which only works for in-cluster + // validation. + IssuerURL string `json:"issuerURL"` + + // Networking specifies network configuration for the cluster. + // Temporarily optional for backward compatibility, required in future releases. + // +optional + Networking ClusterNetworking `json:"networking,omitempty"` + + SSHKey corev1.LocalObjectReference `json:"sshKey"` + + // ClusterID is the unique id that identifies the cluster externally. + // Making it optional here allows us to keep compatibility with previous + // versions of the control-plane-operator that have no knowledge of this + // field. + // +optional + ClusterID string `json:"clusterID,omitempty"` + + InfraID string `json:"infraID"` + Platform PlatformSpec `json:"platform"` + DNS DNSSpec `json:"dns"` + + // ServiceAccountSigningKey is a reference to a secret containing the private key + // used by the service account token issuer. The secret is expected to contain + // a single key named "key". If not specified, a service account signing key will + // be generated automatically for the cluster. + // + // +optional + ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` + + // ControllerAvailabilityPolicy specifies the availability policy applied to + // critical control plane components. The default value is SingleReplica. + // + // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ControllerAvailabilityPolicy is immutable" + // +kubebuilder:default:="SingleReplica" + ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` + + // InfrastructureAvailabilityPolicy specifies the availability policy applied + // to infrastructure services which run on cluster nodes. The default value is + // SingleReplica. + // + // +optional + // +kubebuilder:default:="SingleReplica" + InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` + + // FIPS specifies if the nodes for the cluster will be running in FIPS mode + // +optional + FIPS bool `json:"fips"` + + // KubeConfig specifies the name and key for the kubeconfig secret + // +optional + KubeConfig *KubeconfigSecretRef `json:"kubeconfig,omitempty"` + + // Services defines metadata about how control plane services are published + // in the management cluster. + // +kubebuilder:validation:MaxItems=6 + // +kubebuilder:validation:MinItems=4 + Services []ServicePublishingStrategyMapping `json:"services"` + + // AuditWebhook contains metadata for configuring an audit webhook + // endpoint for a cluster to process cluster audit events. It references + // a secret that contains the webhook information for the audit webhook endpoint. + // It is a secret because if the endpoint has MTLS the kubeconfig will contain client + // keys. This is currently only supported in IBM Cloud. The kubeconfig needs to be stored + // in the secret with a secret key name that corresponds to the constant AuditWebhookKubeconfigKey. + // +optional + AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` + + // Etcd contains metadata about the etcd cluster the hypershift managed Openshift control plane components + // use to store data. + Etcd EtcdSpec `json:"etcd"` + + // Configuration embeds resources that correspond to the openshift configuration API: + // https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html + // +kubebuilder:validation:Optional + Configuration *ClusterConfiguration `json:"configuration,omitempty"` + + // ImageContentSources lists sources/repositories for the release-image content. + // +optional + ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` + + // AdditionalTrustBundle references a ConfigMap containing a PEM-encoded X.509 certificate bundle + // +optional + AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` + + // SecretEncryption contains metadata about the kubernetes secret encryption strategy being used for the + // cluster when applicable. + // +optional + SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` + + // PausedUntil is a field that can be used to pause reconciliation on a resource. + // Either a date can be provided in RFC3339 format or a boolean. If a date is + // provided: reconciliation is paused on the resource until that date. If the boolean true is + // provided: reconciliation is paused on the resource until the field is removed. + // +optional + PausedUntil *string `json:"pausedUntil,omitempty"` + + // OLMCatalogPlacement specifies the placement of OLM catalog components. By default, + // this is set to management and OLM catalog components are deployed onto the management + // cluster. If set to guest, the OLM catalog components will be deployed onto the guest + // cluster. + // + // +kubebuilder:default=management + // +optional + // +immutable + OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` + + // Autoscaling specifies auto-scaling behavior that applies to all NodePools + // associated with the control plane. + // + // +optional + Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` + + // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations when specified, define what custome tolerations are added to the hcp pods. + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// availabilityPolicy specifies a high level availability policy for components. +// +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica +type AvailabilityPolicy string + +const ( + // HighlyAvailable means components should be resilient to problems across + // fault boundaries as defined by the component to which the policy is + // attached. This usually means running critical workloads with 3 replicas and + // with little or no toleration of disruption of the component. + HighlyAvailable AvailabilityPolicy = "HighlyAvailable" + + // SingleReplica means components are not expected to be resilient to problems + // across most fault boundaries associated with high availability. This + // usually means running critical workloads with just 1 replica and with + // toleration of full disruption of the component. + SingleReplica AvailabilityPolicy = "SingleReplica" +) + +type KubeconfigSecretRef struct { + Name string `json:"name"` + Key string `json:"key"` +} + +type ConditionType string + +const ( + HostedControlPlaneAvailable ConditionType = "Available" + HostedControlPlaneDegraded ConditionType = "Degraded" + EtcdSnapshotRestored ConditionType = "EtcdSnapshotRestored" + CVOScaledDown ConditionType = "CVOScaledDown" +) + +// HostedControlPlaneStatus defines the observed state of HostedControlPlane +type HostedControlPlaneStatus struct { + // Ready denotes that the HostedControlPlane API Server is ready to + // receive requests + // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230 + // +kubebuilder:validation:Required + // +kubebuilder:default=false + Ready bool `json:"ready"` + + // Initialized denotes whether or not the control plane has + // provided a kubeadm-config. + // Once this condition is marked true, its value is never changed. See the Ready condition for an indication of + // the current readiness of the cluster's control plane. + // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252 + // +kubebuilder:validation:Required + // +kubebuilder:default=false + Initialized bool `json:"initialized"` + + // ExternalManagedControlPlane indicates to cluster-api that the control plane + // is managed by an external service. + // https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468 + // +kubebuilder:default=true + ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"` + + // ControlPlaneEndpoint contains the endpoint information by which + // external clients can access the control plane. This is populated + // after the infrastructure is ready. + // +kubebuilder:validation:Optional + ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"` + + // OAuthCallbackURLTemplate contains a template for the URL to use as a callback + // for identity providers. The [identity-provider-name] placeholder must be replaced + // with the name of an identity provider defined on the HostedCluster. + // This is populated after the infrastructure is ready. + // +kubebuilder:validation:Optional + OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"` + + // versionStatus is the status of the release version applied by the + // hosted control plane operator. + // +optional + VersionStatus *ClusterVersionStatus `json:"versionStatus,omitempty"` + + // Version is the semantic version of the release applied by + // the hosted control plane operator + // + // Deprecated: Use versionStatus.desired.version instead. + // +kubebuilder:validation:Optional + Version string `json:"version,omitempty"` + + // ReleaseImage is the release image applied to the hosted control plane. + // + // Deprecated: Use versionStatus.desired.image instead. + // +optional + ReleaseImage string `json:"releaseImage,omitempty"` + + // lastReleaseImageTransitionTime is the time of the last update to the current + // releaseImage property. + // + // Deprecated: Use versionStatus.history[0].startedTime instead. + // +kubebuilder:validation:Optional + LastReleaseImageTransitionTime *metav1.Time `json:"lastReleaseImageTransitionTime,omitempty"` + + // KubeConfig is a reference to the secret containing the default kubeconfig + // for this control plane. + KubeConfig *KubeconfigSecretRef `json:"kubeConfig,omitempty"` + + // KubeadminPassword is a reference to the secret containing the initial kubeadmin password + // for the guest cluster. + // +optional + KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` + + // Condition contains details for one aspect of the current state of the HostedControlPlane. + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Platform contains platform-specific status of the HostedCluster + // +optional + Platform *PlatformStatus `json:"platform,omitempty"` + + // +optional + + // NodeCount tracks the number of nodes in the HostedControlPlane. + NodeCount *int `json:"nodeCount,omitempty"` +} + +type APIEndpoint struct { + // Host is the hostname on which the API server is serving. + Host string `json:"host"` + + // Port is the port on which the API server is serving. + Port int32 `json:"port"` +} + +// HostedControlPlaneList contains a list of HostedControlPlanes. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type HostedControlPlaneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HostedControlPlane `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go new file mode 100644 index 000000000..c6b876ab3 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go @@ -0,0 +1,250 @@ +package v1beta1 + +// "Condition values may change back and forth, but some condition transitions may be monotonic, depending on the resource and condition type. +// However, conditions are observations and not, themselves, state machines." +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + +// Conditions. +const ( + // HostedClusterAvailable indicates whether the HostedCluster has a healthy + // control plane. + // When this is false for too long and there's no clear indication in the "Reason", please check the remaining more granular conditions. + HostedClusterAvailable ConditionType = "Available" + // HostedClusterProgressing indicates whether the HostedCluster is attempting + // an initial deployment or upgrade. + // When this is false for too long and there's no clear indication in the "Reason", please check the remaining more granular conditions. + HostedClusterProgressing ConditionType = "Progressing" + // HostedClusterDegraded indicates whether the HostedCluster is encountering + // an error that may require user intervention to resolve. + HostedClusterDegraded ConditionType = "Degraded" + + // Bubble up from HCP. + + // InfrastructureReady bubbles up the same condition from HCP. It signals if the infrastructure for a control plane to be operational, + // e.g. load balancers were created successfully. + // A failure here may require external user intervention to resolve. E.g. hitting quotas on the cloud provider. + InfrastructureReady ConditionType = "InfrastructureReady" + // KubeAPIServerAvailable bubbles up the same condition from HCP. It signals if the kube API server is available. + // A failure here often means a software bug or a non-stable cluster. + KubeAPIServerAvailable ConditionType = "KubeAPIServerAvailable" + // EtcdAvailable bubbles up the same condition from HCP. It signals if etcd is available. + // A failure here often means a software bug or a non-stable cluster. + EtcdAvailable ConditionType = "EtcdAvailable" + // ValidHostedControlPlaneConfiguration bubbles up the same condition from HCP. It signals if the hostedControlPlane input is valid and + // supported by the underlying management cluster. + // A failure here is unlikely to resolve without the changing user input. + ValidHostedControlPlaneConfiguration ConditionType = "ValidHostedControlPlaneConfiguration" + // CloudResourcesDestroyed bubbles up the same condition from HCP. It signals if the cloud provider infrastructure created by Kubernetes + // in the consumer cloud provider account was destroyed. + // A failure here may require external user intervention to resolve. E.g. cloud provider perms were corrupted. E.g. the guest cluster was broken + // and kube resource deletion that affects cloud infra like service type load balancer can't succeed. + CloudResourcesDestroyed ConditionType = "CloudResourcesDestroyed" + // HostedClusterDestroyed indicates that a hosted has finished destroying and that it is waiting for a destroy grace period to go away. + // The grace period is determined by the hypershift.openshift.io/destroy-grace-period annotation in the HostedCluster if present. + HostedClusterDestroyed ConditionType = "HostedClusterDestroyed" + // ExternalDNSReachable bubbles up the same condition from HCP. It signals if the configured external DNS is reachable. + // A failure here requires external user intervention to resolve. E.g. changing the external DNS domain or making sure the domain is created + // and registered correctly. + ExternalDNSReachable ConditionType = "ExternalDNSReachable" + // ValidReleaseInfo bubbles up the same condition from HCP. It indicates if the release contains all the images used by hypershift + // and reports missing images if any. + ValidReleaseInfo ConditionType = "ValidReleaseInfo" + + // Bubble up from HCP which bubbles up from CVO. + + // ClusterVersionSucceeding indicates the current status of the desired release + // version of the HostedCluster as indicated by the Failing condition in the + // underlying cluster's ClusterVersion. + ClusterVersionSucceeding ConditionType = "ClusterVersionSucceeding" + // ClusterVersionUpgradeable indicates the Upgradeable condition in the + // underlying cluster's ClusterVersion. + ClusterVersionUpgradeable ConditionType = "ClusterVersionUpgradeable" + // ClusterVersionFailing bubbles up Failing from the CVO. + ClusterVersionFailing ConditionType = "ClusterVersionFailing" + // ClusterVersionProgressing bubbles up configv1.OperatorProgressing from the CVO. + ClusterVersionProgressing ConditionType = "ClusterVersionProgressing" + // ClusterVersionAvailable bubbles up Failing configv1.OperatorAvailable from the CVO. + ClusterVersionAvailable ConditionType = "ClusterVersionAvailable" + // ClusterVersionReleaseAccepted bubbles up Failing ReleaseAccepted from the CVO. + ClusterVersionReleaseAccepted ConditionType = "ClusterVersionReleaseAccepted" + // ClusterVersionRetrievedUpdates bubbles up RetrievedUpdates from the CVO. + ClusterVersionRetrievedUpdates ConditionType = "ClusterVersionRetrievedUpdates" + + // UnmanagedEtcdAvailable indicates whether a user-managed etcd cluster is + // healthy. + UnmanagedEtcdAvailable ConditionType = "UnmanagedEtcdAvailable" + + // IgnitionEndpointAvailable indicates whether the ignition server for the + // HostedCluster is available to handle ignition requests. + // A failure here often means a software bug or a non-stable cluster. + IgnitionEndpointAvailable ConditionType = "IgnitionEndpointAvailable" + + // IgnitionServerValidReleaseInfo indicates if the release contains all the images used by the local ignition provider + // and reports missing images if any. + IgnitionServerValidReleaseInfo ConditionType = "IgnitionServerValidReleaseInfo" + + // ValidHostedClusterConfiguration signals if the hostedCluster input is valid and + // supported by the underlying management cluster. + // A failure here is unlikely to resolve without the changing user input. + ValidHostedClusterConfiguration ConditionType = "ValidConfiguration" + + // SupportedHostedCluster indicates whether a HostedCluster is supported by + // the current configuration of the hypershift-operator. + // e.g. If HostedCluster requests endpointAcess Private but the hypershift-operator + // is running on a management cluster outside AWS or is not configured with AWS + // credentials, the HostedCluster is not supported. + // A failure here is unlikely to resolve without the changing user input. + SupportedHostedCluster ConditionType = "SupportedHostedCluster" + + // ValidOIDCConfiguration indicates if an AWS cluster's OIDC condition is + // detected as invalid. + // A failure here may require external user intervention to resolve. E.g. oidc was deleted out of band. + ValidOIDCConfiguration ConditionType = "ValidOIDCConfiguration" + + // ValidIDPConfiguration indicates if the Identity Provider configuration is valid. + // A failure here may require external user intervention to resolve + // e.g. the user-provided IDP configuration provided is invalid or the IDP is not reachable. + ValidIDPConfiguration ConditionType = "ValidIDPConfiguration" + + // ValidReleaseImage indicates if the release image set in the spec is valid + // for the HostedCluster. For example, this can be set false if the + // HostedCluster itself attempts an unsupported version before 4.9 or an + // unsupported upgrade e.g y-stream upgrade before 4.11. + // A failure here is unlikely to resolve without the changing user input. + ValidReleaseImage ConditionType = "ValidReleaseImage" + + // ValidKubeVirtInfraNetworkMTU indicates if the MTU configured on an infra cluster + // hosting a guest cluster utilizing kubevirt platform is a sufficient value that will avoid + // performance degradation due to fragmentation of the double encapsulation in ovn-kubernetes + ValidKubeVirtInfraNetworkMTU ConditionType = "ValidKubeVirtInfraNetworkMTU" + + // KubeVirtNodesLiveMigratable indicates if all nodes (VirtualMachines) of the kubevirt + // hosted cluster can be live migrated without experiencing a node restart + KubeVirtNodesLiveMigratable ConditionType = "KubeVirtNodesLiveMigratable" + + // ValidAWSIdentityProvider indicates if the Identity Provider referenced + // in the cloud credentials is healthy. E.g. for AWS the idp ARN is referenced in the iam roles. + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Principal": { + // "Federated": "{{ .ProviderARN }}" + // }, + // "Action": "sts:AssumeRoleWithWebIdentity", + // "Condition": { + // "StringEquals": { + // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} + // } + // } + // } + // ] + // + // A failure here may require external user intervention to resolve. + ValidAWSIdentityProvider ConditionType = "ValidAWSIdentityProvider" + + // ValidAWSKMSConfig indicates whether the AWS KMS role and encryption key are valid and operational + // A failure here indicates that the role or the key are invalid, or the role doesn't have access to use the key. + ValidAWSKMSConfig ConditionType = "ValidAWSKMSConfig" + + // ValidAzureKMSConfig indicates whether the given KMS input for the Azure platform is valid and operational + // A failure here indicates that the input is invalid, or permissions are missing to use the encryption key. + ValidAzureKMSConfig ConditionType = "ValidAzureKMSConfig" + + // AWSDefaultSecurityGroupCreated indicates whether the default security group + // for AWS workers has been created. + // A failure here indicates that NodePools without a security group will be + // blocked from creating machines. + AWSDefaultSecurityGroupCreated ConditionType = "AWSDefaultSecurityGroupCreated" + + // AWSDefaultSecurityGroupDeleted indicates whether the default security group + // for AWS workers has been deleted. + // A failure here indicates that the Security Group has some dependencies that + // there are still pending cloud resources to be deleted that are using that SG. + AWSDefaultSecurityGroupDeleted ConditionType = "AWSDefaultSecurityGroupDeleted" + + // PlatformCredentialsFound indicates that credentials required for the + // desired platform are valid. + // A failure here is unlikely to resolve without the changing user input. + PlatformCredentialsFound ConditionType = "PlatformCredentialsFound" + + // ReconciliationActive indicates if reconciliation of the HostedCluster is + // active or paused hostedCluster.spec.pausedUntil. + ReconciliationActive ConditionType = "ReconciliationActive" + // ReconciliationSucceeded indicates if the HostedCluster reconciliation + // succeeded. + // A failure here often means a software bug or a non-stable cluster. + ReconciliationSucceeded ConditionType = "ReconciliationSucceeded" + + // EtcdRecoveryActive indicates that the Etcd cluster is failing and the + // recovery job was triggered. + EtcdRecoveryActive ConditionType = "EtcdRecoveryActive" + + // ClusterSizeComputed indicates that a t-shirt size was computed for this HostedCluster. + // The last transition time for this condition is used to manage how quickly transitions occur. + ClusterSizeComputed = "ClusterSizeComputed" + // ClusterSizeTransitionPending indicates that a t-shirt size transition is pending, but has + // not been applied yet. This may either be due to transition delays on the cluster itself + // or from management-cluster-wide limits to transition throughput. + ClusterSizeTransitionPending = "ClusterSizeTransitionPending" + // ClusterSizeTransitionRequired exposes the next t-shirt size that the cluster will transition to. + ClusterSizeTransitionRequired = "ClusterSizeTransitionRequired" +) + +// Reasons. +const ( + StatusUnknownReason = "StatusUnknown" + AsExpectedReason = "AsExpected" + NotFoundReason = "NotFound" + WaitingForAvailableReason = "WaitingForAvailable" + SecretNotFoundReason = "SecretNotFound" + WaitingForGracePeriodReason = "WaitingForGracePeriod" + BlockedReason = "Blocked" + + InfraStatusFailureReason = "InfraStatusFailure" + WaitingOnInfrastructureReadyReason = "WaitingOnInfrastructureReady" + + EtcdQuorumAvailableReason = "QuorumAvailable" + EtcdWaitingForQuorumReason = "EtcdWaitingForQuorum" + EtcdStatefulSetNotFoundReason = "StatefulSetNotFound" + EtcdRecoveryJobFailedReason = "EtcdRecoveryJobFailed" + + UnmanagedEtcdMisconfiguredReason = "UnmanagedEtcdMisconfigured" + UnmanagedEtcdAsExpected = "UnmanagedEtcdAsExpected" + + FromClusterVersionReason = "FromClusterVersion" + + InvalidConfigurationReason = "InvalidConfiguration" + KubeconfigWaitingForCreateReason = "KubeconfigWaitingForCreate" + UnsupportedHostedClusterReason = "UnsupportedHostedCluster" + InsufficientClusterCapabilitiesReason = "InsufficientClusterCapabilities" + OIDCConfigurationInvalidReason = "OIDCConfigurationInvalid" + PlatformCredentialsNotFoundReason = "PlatformCredentialsNotFound" + InvalidImageReason = "InvalidImage" + InvalidIdentityProvider = "InvalidIdentityProvider" + PayloadArchNotFoundReason = "PayloadArchNotFound" + + InvalidIAMRoleReason = "InvalidIAMRole" + + InvalidAzureCredentialsReason = "InvalidAzureCredentials" + AzureErrorReason = "AzureError" + + ExternalDNSHostNotReachableReason = "ExternalDNSHostNotReachable" + + KASLoadBalancerNotReachableReason = "KASLoadBalancerNotReachable" + + MissingReleaseImagesReason = "MissingReleaseImages" + + ReconciliationPausedConditionReason = "ReconciliationPaused" + ReconciliationInvalidPausedUntilConditionReason = "InvalidPausedUntilValue" + + KubeVirtSuboptimalMTUReason = "KubeVirtSuboptimalMTUDetected" + + KubeVirtNodesLiveMigratableReason = "KubeVirtNodesNotLiveMigratable" +) + +// Messages. +const ( + // AllIsWellMessage is standard message. + AllIsWellMessage = "All is well" +) diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go new file mode 100644 index 000000000..8b15ec95c --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go @@ -0,0 +1,1570 @@ +package v1beta1 + +import ( + "fmt" + "strings" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/hypershift/api/util/ipnet" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HostedCluster{}, + &HostedClusterList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + + return nil + }) +} + +const ( + // AuditWebhookKubeconfigKey is the key name in the AuditWebhook secret that stores audit webhook kubeconfig + AuditWebhookKubeconfigKey = "webhook-kubeconfig" + DisablePKIReconciliationAnnotation = "hypershift.openshift.io/disable-pki-reconciliation" + // SkipReleaseImageValidation skips any release validation that the HO version might dictate for any HC and skip min supported version check for NodePools. + SkipReleaseImageValidation = "hypershift.openshift.io/skip-release-image-validation" + IdentityProviderOverridesAnnotationPrefix = "idpoverrides.hypershift.openshift.io/" + OauthLoginURLOverrideAnnotation = "oauth.hypershift.openshift.io/login-url-override" + // HCDestroyGracePeriodAnnotation is an annotation which will delay the removal of the HostedCluster finalizer to allow consumers to read the status of the HostedCluster + // before the resource goes away. The format of the annotation is a go duration string with a numeric component and unit. + // sample: hypershift.openshift.io/destroy-grace-period: "600s" + HCDestroyGracePeriodAnnotation = "hypershift.openshift.io/destroy-grace-period" + // ControlPlanePriorityClass is for pods in the HyperShift Control Plane that are not API critical but still need elevated priority. E.g Cluster Version Operator. + ControlPlanePriorityClass = "hypershift.openshift.io/control-plane-priority-class" + // APICriticalPriorityClass is for pods that are required for API calls and resource admission to succeed. This includes pods like kube-apiserver, aggregated API servers, and webhooks. + APICriticalPriorityClass = "hypershift.openshift.io/api-critical-priority-class" + // EtcdPriorityClass is for etcd pods. + EtcdPriorityClass = "hypershift.openshift.io/etcd-priority-class" + // KonnectivityServerImageAnnotation is a temporary annotation that allows the specification of the konnectivity server image. + // This will be removed when Konnectivity is added to the Openshift release payload + KonnectivityServerImageAnnotation = "hypershift.openshift.io/konnectivity-server-image" + // KonnectivityAgentImageAnnotation is a temporary annotation that allows the specification of the konnectivity agent image. + // This will be removed when Konnectivity is added to the Openshift release payload + KonnectivityAgentImageAnnotation = "hypershift.openshift.io/konnectivity-agent-image" + // ControlPlaneOperatorImageAnnotation is an annotation that allows the specification of the control plane operator image. + // This is used for development and e2e workflows + ControlPlaneOperatorImageAnnotation = "hypershift.openshift.io/control-plane-operator-image" + // ControlPlaneOperatorImageLabelsAnnotation is an annotation that allows the specification of the control plane operator image labels. + // Labels are provided in a comma-delimited format: key=value,key2=value2 + // This is used for development and e2e workflows + ControlPlaneOperatorImageLabelsAnnotation = "hypershift.openshift.io/control-plane-operator-image-labels" + // RestartDateAnnotation is a annotation that can be used to trigger a rolling restart of all components managed by hypershift. + // it is important in some situations like CA rotation where components need to be fully restarted to pick up new CAs. It's also + // important in some recovery situations where a fresh start of the component helps fix symptoms a user might be experiencing. + RestartDateAnnotation = "hypershift.openshift.io/restart-date" + // ReleaseImageAnnotation is an annotation that can be used to see what release image a given deployment is tied to + ReleaseImageAnnotation = "hypershift.openshift.io/release-image" + // ClusterAPIManagerImage is an annotation that allows the specification of the cluster api manager image. + // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side: + // no images can be pulled from registries outside of IBM Cloud's official regional registries + ClusterAPIManagerImage = "hypershift.openshift.io/capi-manager-image" + // ClusterAutoscalerImage is an annotation that allows the specification of the cluster autoscaler image. + // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side: + // no images can be pulled from registries outside of IBM Cloud's official regional registries + ClusterAutoscalerImage = "hypershift.openshift.io/cluster-autoscaler-image" + // AWSKMSProviderImage is an annotation that allows the specification of the AWS kms provider image. + // Upstream code located at: https://github.com/kubernetes-sigs/aws-encryption-provider + AWSKMSProviderImage = "hypershift.openshift.io/aws-kms-provider-image" + // IBMCloudKMSProviderImage is an annotation that allows the specification of the IBM Cloud kms provider image. + IBMCloudKMSProviderImage = "hypershift.openshift.io/ibmcloud-kms-provider-image" + // PortierisImageAnnotation is an annotation that allows the specification of the portieries component + // (performs container image verification). + PortierisImageAnnotation = "hypershift.openshift.io/portieris-image" + // PrivateIngressControllerAnnotation is an annotation that configures ingress controller with endpoint publishing strategy as Private. + // This overrides any opinionated strategy set by platform in ReconcileDefaultIngressController. + // It's used by IBM cloud to support ingress endpoint publishing strategy scope + // NOTE: We'll expose this in the API if the use case gets generalised. + PrivateIngressControllerAnnotation = "hypershift.openshift.io/private-ingress-controller" + // IngressControllerLoadBalancerScope is an annotation that allows the specification of the LoadBalancer scope for ingress controller. + IngressControllerLoadBalancerScope = "hypershift.openshift.io/ingress-controller-load-balancer-scope" + + // CertifiedOperatorsCatalogImageAnnotation, CommunityOperatorsCatalogImageAnnotation, RedHatMarketplaceCatalogImageAnnotation and RedHatOperatorsCatalogImageAnnotation + // are annotations that can be used to override the address of the images used for the OLM catalogs if in the `management` OLMCatalogPlacement mode. + // If used, all of them should be set at the same time referring images only by digest (`...@sha256:`). + // This will disable the imagestream used to keep the catalog images up to date. + CertifiedOperatorsCatalogImageAnnotation = "hypershift.openshift.io/certified-operators-catalog-image" + CommunityOperatorsCatalogImageAnnotation = "hypershift.openshift.io/community-operators-catalog-image" + RedHatMarketplaceCatalogImageAnnotation = "hypershift.openshift.io/redhat-marketplace-catalog-image" + RedHatOperatorsCatalogImageAnnotation = "hypershift.openshift.io/redhat-operators-catalog-image" + + // OLMCatalogsISRegistryOverridesAnnotation overrides the image registries used for the ImageStream used for the OLM catalogs. + // It contains the source registry string as a key and the destination registry string as value. + // Images before being applied are scanned for the source registry string and if found the string is replaced with the destination registry string. + // Format is: "sr1=dr1,sr2=dr2" + OLMCatalogsISRegistryOverridesAnnotation = "hypershift.openshift.io/olm-catalogs-is-registry-overrides" + + // ClusterAPIProviderAWSImage overrides the CAPI AWS provider image to use for + // a HostedControlPlane. + ClusterAPIProviderAWSImage = "hypershift.openshift.io/capi-provider-aws-image" + + // ClusterAPIKubeVirtProviderImage overrides the CAPI KubeVirt provider image to use for + // a HostedControlPlane. + ClusterAPIKubeVirtProviderImage = "hypershift.openshift.io/capi-provider-kubevirt-image" + + // ClusterAPIAgentProviderImage overrides the CAPI Agent provider image to use for + // a HostedControlPlane. + ClusterAPIAgentProviderImage = "hypershift.openshift.io/capi-provider-agent-image" + + // ClusterAPIAzureProviderImage overrides the CAPI Azure provider image to use for + // a HostedControlPlane. + ClusterAPIAzureProviderImage = "hypershift.openshift.io/capi-provider-azure-image" + + // ClusterAPIPowerVSProviderImage overrides the CAPI PowerVS provider image to use for + // a HostedControlPlane. + ClusterAPIPowerVSProviderImage = "hypershift.openshift.io/capi-provider-powervs-image" + + // ClusterAPIOpenStackProviderImage overrides the CAPI OpenStack provider image to use for + // a HostedControlPlane. + ClusterAPIOpenStackProviderImage = "hypershift.openshift.io/capi-provider-openstack-image" + + // AESCBCKeySecretKey defines the Kubernetes secret key name that contains the aescbc encryption key + // in the AESCBC secret encryption strategy + AESCBCKeySecretKey = "key" + // IBMCloudIAMAPIKeySecretKey defines the Kubernetes secret key name that contains + // the customer IBMCloud apikey in the unmanaged authentication strategy for IBMCloud KMS secret encryption + IBMCloudIAMAPIKeySecretKey = "iam_apikey" + // AWSCredentialsFileSecretKey defines the Kubernetes secret key name that contains + // the customer AWS credentials in the unmanaged authentication strategy for AWS KMS secret encryption + AWSCredentialsFileSecretKey = "credentials" + // ControlPlaneComponentLabel identifies a resource as belonging to a hosted control plane. + ControlPlaneComponentLabel = "hypershift.openshift.io/control-plane-component" + + // OperatorComponent identifies a component as belonging to the operator. + OperatorComponent = "hypershift.openshift.io/operator-component" + // MachineApproverImage is an annotation that allows the specification of the machine approver image. + // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side: + // no images can be pulled from registries outside of IBM Cloud's official regional registries + MachineApproverImage = "hypershift.openshift.io/machine-approver-image" + + // ExternalDNSHostnameAnnotation is the annotation external-dns uses to register DNS name for different HCP services. + ExternalDNSHostnameAnnotation = "external-dns.alpha.kubernetes.io/hostname" + + // ForceUpgradeToAnnotation is the annotation that forces HostedCluster upgrade even if the underlying ClusterVersion + // is reporting it is not Upgradeable. The annotation value must be set to the release image being forced. + ForceUpgradeToAnnotation = "hypershift.openshift.io/force-upgrade-to" + + // ServiceAccountSigningKeySecretKey is the name of the secret key that should contain the service account signing + // key if specified. + ServiceAccountSigningKeySecretKey = "key" + + // DisableProfilingAnnotation is the annotation that allows disabling profiling for control plane components. + // Any components specified in this list will have profiling disabled. Profiling is disabled by default for etcd and konnectivity. + // Components this annotation can apply to: kube-scheduler, kube-controller-manager, kube-apiserver. + DisableProfilingAnnotation = "hypershift.openshift.io/disable-profiling" + + // CleanupCloudResourcesAnnotation is an annotation that indicates whether a guest cluster's resources should be + // removed when deleting the corresponding HostedCluster. If set to "true", resources created on the cloud provider during the life + // of the cluster will be removed, including image registry storage, ingress dns records, load balancers, and persistent storage. + CleanupCloudResourcesAnnotation = "hypershift.openshift.io/cleanup-cloud-resources" + + // ResourceRequestOverrideAnnotationPrefix is a prefix for an annotation to override resource requests for a particular deployment/container + // in a hosted control plane. The format of the annotation is: + // resource-request-override.hypershift.openshift.io/[deployment-name].[container-name]: [resource-type-1]=[value1],[resource-type-2]=[value2],... + // For example, to override the memory and cpu request for the Kubernetes APIServer: + // resource-request-override.hypershift.openshift.io/kube-apiserver.kube-apiserver: memory=3Gi,cpu=2000m + ResourceRequestOverrideAnnotationPrefix = "resource-request-override.hypershift.openshift.io" + + // LimitedSupportLabel is a label that can be used by consumers to indicate + // a cluster is somehow out of regular support policy. + // https://docs.openshift.com/rosa/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.html#rosa-limited-support_rosa-service-definition. + LimitedSupportLabel = "api.openshift.com/limited-support" + + // SilenceClusterAlertsLabel is a label that can be used by consumers to indicate + // alerts from a cluster can be silenced or ignored + SilenceClusterAlertsLabel = "hypershift.openshift.io/silence-cluster-alerts" + + // KubeVirtInfraCredentialsSecretName is a name of the secret in the hosted control plane namespace containing the kubeconfig + // of an external infrastructure cluster for kubevirt provider + KubeVirtInfraCredentialsSecretName = "kubevirt-infra-credentials" + + // InfraIDLabel is a label that indicates the hosted cluster's infra id + // that the resource is associated with. + InfraIDLabel = "hypershift.openshift.io/infra-id" + + // NodePoolNameLabel is a label that indicates the name of the node pool + // a resource is associated with + NodePoolNameLabel = "hypershift.openshift.io/nodepool-name" + + // RouteVisibilityLabel is a label that can be used by external-dns to filter routes + // it should not consider for name registration + RouteVisibilityLabel = "hypershift.openshift.io/route-visibility" + + // RouteVisibilityPrivate is a value for RouteVisibilityLabel that will result + // in the labeled route being ignored by external-dns + RouteVisibilityPrivate = "private" + + // AllowUnsupportedKubeVirtRHCOSVariantsAnnotation allows a NodePool to use image sources + // other than the official rhcos kubevirt variant, such as the openstack variant. This + // allows the creation of guest clusters <= 4.13, which are before the rhcos kubevirt + // variant was released. + AllowUnsupportedKubeVirtRHCOSVariantsAnnotation = "hypershift.openshift.io/allow-unsupported-kubevirt-rhcos-variants" + + // ImageOverridesAnnotation is passed as a flag to the CPO to allow overriding release images. + // The format of the annotation value is a commma-separated list of image=ref pairs like: + // cluster-network-operator=example.com/cno:latest,ovn-kubernetes=example.com/ovnkube:latest + ImageOverridesAnnotation = "hypershift.openshift.io/image-overrides" + + // EnsureExistsPullSecretReconciliation enables a reconciliation behavior on in cluster pull secret + // resources that enables user modifications to the resources while ensuring they do exist. This + // allows users to execute workflows like disabling insights operator + EnsureExistsPullSecretReconciliation = "hypershift.openshift.io/ensureexists-pullsecret-reconcile" + + // HostedClusterLabel is used as a label on nodes that are dedicated to a specific hosted cluster + HostedClusterLabel = "hypershift.openshift.io/cluster" + + // RequestServingComponentLabel is used as a label on pods and nodes for dedicated serving components. + RequestServingComponentLabel = "hypershift.openshift.io/request-serving-component" + + // TopologyAnnotation indicates the type of topology that should take effect for the + // hosted cluster's control plane workloads. Currently the only value supported is "dedicated-request-serving-components". + // We implicitly support shared and dedicated. + TopologyAnnotation = "hypershift.openshift.io/topology" + + // HostedClusterScheduledAnnotation indicates that a hosted cluster with dedicated request serving components + // has been assigned dedicated nodes. If not present, the hosted cluster needs scheduling. + HostedClusterScheduledAnnotation = "hypershift.openshift.io/cluster-scheduled" + + // DedicatedRequestServingComponentsTopology indicates that control plane request serving + // components should be scheduled on dedicated nodes in the management cluster. + DedicatedRequestServingComponentsTopology = "dedicated-request-serving-components" + + // RequestServingNodeAdditionalSelectorAnnotation is used to specify an additional node selector for + // request serving nodes. The value is a comma-separated list of key=value pairs. + RequestServingNodeAdditionalSelectorAnnotation = "hypershift.openshift.io/request-serving-node-additional-selector" + + // DisableMachineManagement Disable deployments related to machine management that includes cluster-api, cluster-autoscaler, machine-approver. + DisableMachineManagement = "hypershift.openshift.io/disable-machine-management" + + // AllowGuestWebhooksServiceLabel marks a service deployed in the control plane as a valid target + // for validating/mutating webhooks running in the guest cluster. + AllowGuestWebhooksServiceLabel = "hypershift.openshift.io/allow-guest-webhooks" + + // PodSecurityAdmissionLabelOverrideAnnotation allows overriding the pod security admission label on + // hosted control plane namespacces. The default is 'Restricted'. Valid values are 'Restricted', 'Baseline', or 'Privileged' + // See https://github.com/openshift/enhancements/blob/master/enhancements/authentication/pod-security-admission.md + PodSecurityAdmissionLabelOverrideAnnotation = "hypershift.openshift.io/pod-security-admission-label-override" + + // DisableMonitoringServices introduces an option to disable monitor services IBM Cloud do not use. + DisableMonitoringServices = "hypershift.openshift.io/disable-monitoring-services" + + // JSONPatchAnnotation allow modifying the kubevirt VM template using jsonpatch + JSONPatchAnnotation = "hypershift.openshift.io/kubevirt-vm-jsonpatch" + + // KubeAPIServerGOGCAnnotation allows modifying the kube-apiserver GOGC environment variable to impact how often + // the GO garbage collector runs. This can be used to reduce the memory footprint of the kube-apiserver. + KubeAPIServerGOGCAnnotation = "hypershift.openshift.io/kube-apiserver-gogc" + + // KubeAPIServerGOMemoryLimitAnnotation allows modifying the kube-apiserver GOMEMLIMIT environment variable to increase + // the frequency of memory collection when memory used rises above a particular threshhold. This can be used to reduce + // the memory footprint of the kube-apiserver during upgrades. + KubeAPIServerGOMemoryLimitAnnotation = "hypershift.openshift.io/kube-apiserver-gomemlimit" + + // KubeAPIServerMaximumRequestsInFlight allows overriding the default value for the kube-apiserver max-requests-inflight + // flag. This allows controlling how many concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-requests-inflight" + + // KubeAPIServerMaximumMutatingRequestsInFlight allows overring the default value for the kube-apiserver max-mutating-requests-inflight + // flag. This allows controlling how many mutating concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumMutatingRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-mutating-requests-inflight" + + // AWSLoadBalancerSubnetsAnnotation allows specifying the subnets to use for control plane load balancers + // in the AWS platform. + AWSLoadBalancerSubnetsAnnotation = "hypershift.openshift.io/aws-load-balancer-subnets" + + // DisableClusterAutoscalerAnnotation allows disabling the cluster autoscaler for a hosted cluster. + // This annotation is only set by the hypershift-operator on HosterControlPlanes. + // It is not set by the end-user. + DisableClusterAutoscalerAnnotation = "hypershift.openshift.io/disable-cluster-autoscaler" + + // AroHCP represents the ARO HCP managed service offering + AroHCP = "ARO-HCP" + + // HostedClusterSizeLabel is a label on HostedClusters indicating a size based on the number of nodes. + HostedClusterSizeLabel = "hypershift.openshift.io/hosted-cluster-size" + + // NodeSizeLabel is a label on nodes used to match cluster size to a node size. + NodeSizeLabel = "hypershift.openshift.io/cluster-size" + + // ManagementPlatformAnnotation specifies the infrastructure platform of the underlying management cluster + ManagementPlatformAnnotation = "hypershift.openshift.io/management-platform" + + // MachineHealthCheckTimeoutAnnotation allows overriding the default machine health check timeout for + // nodepools. The annotation can be set in either the HostedCluster or the NodePool. If set on both, the + // one on the NodePool takes precedence. The value is a go duration string with a number and a unit (ie. 8m, 1h, etc) + MachineHealthCheckTimeoutAnnotation = "hypershift.openshift.io/machine-health-check-timeout" + + // MachineHealthCheckNodeStartupTimeoutAnnotation allows overriding the default machine health check timeout for + // node startup on nodepools. The annotation can be set in either the HostedCluster or the NodePool. If set on both, the + // one on the NodePool takes precedence. The value is a go duration string with a number and a unit (ie. 8m, 1h, etc) + MachineHealthCheckNodeStartupTimeoutAnnotation = "hypershift.openshift.io/machine-health-check-node-startup-timeout" + + // MachineHealthCheckMaxUnhealthyAnnotation allows overriding the max unhealthy value of the machine + // health check created for a NodePool. The annotation can be set in either the HostedCluster or the NodePool. + // If set on both, the one on the NodePool takes precedence. The value can be a number or a percentage value. + MachineHealthCheckMaxUnhealthyAnnotation = "hypershift.openshift.io/machine-health-check-max-unhealthy" + + // ClusterSizeOverrideAnnotation allows overriding the value of the size label regardless of the number + // of workers associated with the HostedCluster. The value should be the desired size label. + ClusterSizeOverrideAnnotation = "hypershift.openshift.io/cluster-size-override" + + // KubeAPIServerVerbosityLevelAnnotation allows specifing the log verbosity of kube-apiserver. + KubeAPIServerVerbosityLevelAnnotation = "hypershift.openshift.io/kube-apiserver-verbosity-level" + + // NodePoolSupportsKubevirtTopologySpreadConstraintsAnnotation indicates if the NodePool currently supports + // using TopologySpreadConstraints on the KubeVirt VMs. + // + // Newer versions of the NodePool controller transitioned to spreading VMs across the cluster + // using TopologySpreadConstraints instead of Pod Anti-Affinity. When the new controller interacts + // with a older NodePool that was previously using pod anti-affinity, we don't want to immediately + // start using TopologySpreadConstraints because it will cause the MachineSet controller to update + // and replace all existing VMs. For example, it would be unexpected for a user to update the + // NodePool controller and for that to trigger a rolling update of all KubeVirt VMs. + // + // This annotation signals to the NodePool controller that it is safe to use TopologySpreadConstraints on a NodePool + // without triggering an unexpected update of KubeVirt VMs. + NodePoolSupportsKubevirtTopologySpreadConstraintsAnnotation = "hypershift.openshift.io/nodepool-supports-kubevirt-topology-spread-constraints" + + // IsKubeVirtRHCOSVolumeLabelName labels rhcos DataVolumes and PVCs, to be able to filter them, e.g. for backup + IsKubeVirtRHCOSVolumeLabelName = "hypershift.openshift.io/is-kubevirt-rhcos" + + // SkipControlPlaneNamespaceDeletionAnnotation tells the the hosted cluster controller not to delete the hosted control plane + // namespace during hosted cluster deletion when this annotation is set to the value "true". + SkipControlPlaneNamespaceDeletionAnnotation = "hypershift.openshift.io/skip-delete-hosted-controlplane-namespace" + + // DisableIgnitionServerAnnotation controls skipping of the ignition server deployment. + DisableIgnitionServerAnnotation = "hypershift.openshift.io/disable-ignition-server" + + // ControlPlaneOperatorV2Annotation tells the hosted cluster to set 'CPO_V2' env variable on the CPO deployment which enables + // the new manifest based CPO implementation. + ControlPlaneOperatorV2Annotation = "hypershift.openshift.io/cpo-v2" + + // ControlPlaneOperatorV2EnvVar when set on the CPO deplyoment, enables the new manifest based CPO implementation. + ControlPlaneOperatorV2EnvVar = "CPO_V2" +) + +// HostedClusterSpec is the desired behavior of a HostedCluster. + +// +kubebuilder:validation:XValidation:rule=`self.platform.type != "IBMCloud" ? self.services == oldSelf.services : true`, message="Services is immutable. Changes might result in unpredictable and disruptive behavior." +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "APIServer" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires APIServer Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "OAuthServer" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires OAuthServer Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "Konnectivity" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires Konnectivity Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "Ignition" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires Ignition Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`has(self.issuerURL) || !has(self.serviceAccountSigningKey)`,message="If serviceAccountSigningKey is set, issuerURL must be set" + +type HostedClusterSpec struct { + // release specifies the desired OCP release payload for all the hosted cluster components. + // This includes those components running management side like the Kube API Server and the CVO but also the operands which land in the hosted cluster data plane like the ingress controller, ovn agents, etc. + // The maximum and minimum supported release versions are determined by the running Hypersfhit Operator. + // Attempting to use an unsupported version will result in the HostedCluster being degraded and the validateReleaseImage condition being false. + // Attempting to use a release with a skew against a NodePool release bigger than N-2 for the y-stream will result in leaving the NodePool in an unsupported state. + // Changing this field will trigger a rollout of the control plane components. + // The behavior of the rollout will be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy for PDBs and maxUnavailable and surce policies. + // +required + Release Release `json:"release"` + + // controlPlaneRelease is like spec.release but only for the components running on the management cluster. + // This excludes any operand which will land in the hosted cluster data plane. + // It is useful when you need to apply patch management side like a CVE, transparently for the hosted cluster. + // Version input for this field is free, no validation is performed against spec.release or maximum and minimum is performed. + // If defined, it will dicate the version of the components running management side, while spec.release will dictate the version of the components landing in the hosted cluster data plane. + // If not defined, spec.release is used for both. + // Changing this field will trigger a rollout of the control plane. + // The behavior of the rollout will be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy for PDBs and maxUnavailable and surce policies. + // +optional + ControlPlaneRelease *Release `json:"controlPlaneRelease,omitempty"` + + // clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal digits). + // As with a Kubernetes metadata.uid, this ID uniquely identifies this cluster in space and time. + // This value identifies the cluster in metrics pushed to telemetry and metrics produced by the control plane operators. + // If a value is not specified, a random clusterID will be generated and set by the controller. + // Once set, this value is immutable. + // +kubebuilder:validation:XValidation:rule="self.matches('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')",message="clusterID must be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal digits)" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="clusterID is immutable" + // +kubebuilder:validation:MaxLength=36 + // +kubebuilder:validation:MinLength=36 + // +optional + ClusterID string `json:"clusterID,omitempty"` + + // infraID is a globally unique identifier for the cluster. + // It must consist of lowercase alphanumeric characters and hyphens ('-') only, and start and end with an alphanumeric character. + // It must be no more than 253 characters in length. + // This identifier will be used to associate various cloud resources with the HostedCluster and its associated NodePools. + // infraID is used to compute and tag created resources with "kubernetes.io/cluster/"+hcluster.Spec.InfraID which has contractual meaning for the cloud provider implementations. + // If a value is not specified, a random infraID will be generated and set by the controller. + // Once set, this value is immutable. + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="infraID must consist of lowercase alphanumeric characters or '-', start and end with an alphanumeric character, and be between 1 and 253 characters" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="infraID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +optional + InfraID string `json:"infraID,omitempty"` + + // updateService may be used to specify the preferred upstream update service. + // If ommitted we will use the appropriate update service for the cluster and region. + // This is used by the control plane operator to determine and signal the appropriate available upgrades in the hostedCluster.status. + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="updateService must be a valid absolute URL" + // +optional + UpdateService configv1.URL `json:"updateService,omitempty"` + + // channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. + // If ommited no particular upgrades are suggested. + // TODO(alberto): Consider the backend to use the default channel by default. Default channel will contain stable updates that are appropriate for production clusters. + // +kubebuilder:validation:MaxLength=100 + // +kubebuilder:validation:MinLength=1 + // +optional + Channel string `json:"channel,omitempty"` + + // platform specifies the underlying infrastructure provider for the cluster + // and is used to configure platform specific behavior. + // +required + Platform PlatformSpec `json:"platform"` + + // controllerAvailabilityPolicy specifies the availability policy applied to critical control plane components like the Kube API Server. + // Possible values are HighlyAvailable and SingleReplica. The default value is HighlyAvailable. + // +optional + // +kubebuilder:default:="HighlyAvailable" + ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` + + // infrastructureAvailabilityPolicy specifies the availability policy applied to infrastructure services which run on the hosted cluster data plane like the ingress controller and image registry controller. + // Possible values are HighlyAvailable and SingleReplica. The default value is SingleReplica. + // +optional + // +kubebuilder:default:="SingleReplica" + InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` + + // dns specifies the DNS configuration for the hosted cluster ingress. + // +optional + DNS DNSSpec `json:"dns,omitempty"` + + // networking specifies network configuration for the hosted cluster. + // Defaults to OVNKubernetes with a cluster network of cidr: "10.132.0.0/14" and a service network of cidr: "172.31.0.0/16". + // +required + // +kubebuilder:default={networkType: "OVNKubernetes", clusterNetwork: {{cidr: "10.132.0.0/14"}}, serviceNetwork: {{cidr: "172.31.0.0/16"}}} + Networking ClusterNetworking `json:"networking"` + + // autoscaling specifies auto-scaling behavior that applies to all NodePools + // associated with this HostedCluster. + // + // +optional + Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` + + // etcd specifies configuration for the control plane etcd cluster. The + // default managementType is Managed. Once set, the managementType cannot be + // changed. + // + // +kubebuilder:default={managementType: "Managed", managed: {storage: {type: "PersistentVolume", persistentVolume: {size: "8Gi"}}}} + // +required + // +immutable + Etcd EtcdSpec `json:"etcd"` + + // services specifies how individual control plane services endpoints are published for consumption. + // This requires APIServer;OAuthServer;Konnectivity;Ignition. + // This field is immutable for all platforms but IBMCloud. + // Max is 6 to account for OIDC;OVNSbDb for backward compability though they are no-op. + // + // +kubebuilder:validation:MaxItems=6 + // +kubebuilder:validation:MinItems=4 + // +kubebuilder:validation:ListType=atomic + // +kubebuilder:validation:XValidation:rule="self.all(s, !(s.service == 'APIServer' && s.servicePublishingStrategy.type == 'Route') || has(s.servicePublishingStrategy.route.hostname))",message="If serviceType is 'APIServer' and publishing strategy is 'Route', then hostname must be set" + // +kubebuilder:validation:XValidation:rule="['APIServer', 'OAuthServer', 'Konnectivity', 'Ignition'].all(requiredType, self.exists(s, s.service == requiredType))",message="Services list must contain at least 'APIServer', 'OAuthServer', 'Konnectivity', and 'Ignition' service types" + // +kubebuilder:validation:XValidation:rule="self.filter(s, s.servicePublishingStrategy.type == 'Route' && has(s.servicePublishingStrategy.route) && has(s.servicePublishingStrategy.route.hostname)).all(x, self.filter(y, y.servicePublishingStrategy.type == 'Route' && (has(y.servicePublishingStrategy.route) && has(y.servicePublishingStrategy.route.hostname) && y.servicePublishingStrategy.route.hostname == x.servicePublishingStrategy.route.hostname)).size() <= 1)",message="Each route publishingStrategy 'hostname' must be unique within the Services list." + // +kubebuilder:validation:XValidation:rule="self.filter(s, s.servicePublishingStrategy.type == 'NodePort' && has(s.servicePublishingStrategy.nodePort) && has(s.servicePublishingStrategy.nodePort.address) && has(s.servicePublishingStrategy.nodePort.port)).all(x, self.filter(y, y.servicePublishingStrategy.type == 'NodePort' && (has(y.servicePublishingStrategy.nodePort) && has(y.servicePublishingStrategy.nodePort.address) && y.servicePublishingStrategy.nodePort.address == x.servicePublishingStrategy.nodePort.address && has(y.servicePublishingStrategy.nodePort.port) && y.servicePublishingStrategy.nodePort.port == x.servicePublishingStrategy.nodePort.port )).size() <= 1)",message="Each nodePort publishingStrategy 'nodePort' and 'hostname' must be unique within the Services list." + // +required + // +immutable + Services []ServicePublishingStrategyMapping `json:"services"` + + // pullSecret is a local reference to a Secret that must have a ".dockerconfigjson" key whose content must be a valid Openshift pull secret JSON. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // This pull secret will be part of every payload generated by the controllers for any NodePool of the HostedCluster + // and it will be injected into the container runtime of all NodePools. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // Changing the content of the secret inplace will not trigger a rollout and might result in unpredicatble behaviour. + // +required + // +rollout + // TODO(alberto): have our own local reference type to include our opinions and avoid transparent changes. + PullSecret corev1.LocalObjectReference `json:"pullSecret"` + + // sshKey is a local reference to a Secret that must have a "id_rsa.pub" key whose content must be the public part of 1..N SSH keys. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // When sshKey is set, the controllers will generate a machineConfig with the sshAuthorizedKeys https://coreos.github.io/ignition/configuration-v3_2/ populated with this value. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // +rollout + // +optional + SSHKey corev1.LocalObjectReference `json:"sshKey"` + + // issuerURL is an OIDC issuer URL which will be used as the issuer in all + // ServiceAccount tokens generated by the control plane API server via --service-account-issuer kube api server flag. + // https://k8s-docs.netlify.app/en/docs/reference/command-line-tools-reference/kube-apiserver/ + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#serviceaccount-token-volume-projection + // The default value is kubernetes.default.svc, which only works for in-cluster + // validation. + // If the platform is AWS and this value is set, the controller will update an s3 object with the appropriate OIDC documents (using the serviceAccountSigningKey info) into that issuerURL. + // The expectation is for this s3 url to be backed by an OIDC provider in the AWS IAM. + // +kubebuilder:default:="https://kubernetes.default.svc" + // +immutable + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="issuerURL is immutable" + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="issuerURL must be a valid absolute URL" + IssuerURL string `json:"issuerURL,omitempty"` + + // serviceAccountSigningKey is a local reference to a secret that must have a "key" key whose content must be the private key + // used by the service account token issuer. + // If not specified, a service account signing key will + // be generated automatically for the cluster. + // When specifying a service account signing key, an IssuerURL must also be specified. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // + // +immutable + // +optional + ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` + + // Configuration specifies configuration for individual OCP components in the + // cluster, represented as embedded resources that correspond to the openshift + // configuration API. + // + // +optional + Configuration *ClusterConfiguration `json:"configuration,omitempty"` + + // AuditWebhook contains metadata for configuring an audit webhook endpoint + // for a cluster to process cluster audit events. It references a secret that + // contains the webhook information for the audit webhook endpoint. It is a + // secret because if the endpoint has mTLS the kubeconfig will contain client + // keys. The kubeconfig needs to be stored in the secret with a secret key + // name that corresponds to the constant AuditWebhookKubeconfigKey. + // + // +optional + // +immutable + AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` + + // imageContentSources specifies image mirrors that can be used by cluster + // nodes to pull content. + // When imageContentSources is set, the controllers will generate a machineConfig. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // +optional + ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` + + // additionalTrustBundle is a local reference to a ConfigMap that must have a "ca-bundle.crt" key + // whose content must be a PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // This will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // +optional + AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` + + // secretEncryption specifies a Kubernetes secret encryption strategy for the + // control plane. + // + // +optional + SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` + + // fips indicates whether this cluster's nodes will be running in FIPS mode. + // If set to true, the control plane's ignition server will be configured to + // expect that nodes joining the cluster will be FIPS-enabled. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="fips is immutable" + // +optional + // +immutable + FIPS bool `json:"fips"` + + // pausedUntil is a field that can be used to pause reconciliation on the HostedCluster controller, resulting in any change to the HostedCluster being ignored. + // Either a date can be provided in RFC3339 format or a boolean as in 'true', 'false', 'True', 'False'. If a date is + // provided: reconciliation is paused on the resource until that date. If the boolean true is + // provided: reconciliation is paused on the resource until the field is removed. + // +kubebuilder:validation:MaxLength=35 + // +kubebuilder:validation:MinLength=4 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*$') || self in ['true', 'false', 'True', 'False']`,message="PausedUntil must be a date in RFC3339 format or 'True', 'true', 'False' or 'false'" + // +optional + PausedUntil *string `json:"pausedUntil,omitempty"` + + // OLMCatalogPlacement specifies the placement of OLM catalog components. By default, + // this is set to management and OLM catalog components are deployed onto the management + // cluster. If set to guest, the OLM catalog components will be deployed onto the guest + // cluster. + // + // +kubebuilder:default=management + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="OLMCatalogPlacement is immutable" + // +optional + // +immutable + OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` + + // NodeSelector when specified, is propagated to all control plane Deployments and Stateful sets running management side. + // It must be satisfied by the management Nodes for the pods to be scheduled. Otherwise the HostedCluster will enter a degraded state. + // Changes to this field will propagate to existing Deployments and StatefulSets. + // +kubebuilder:validation:XValidation:rule="size(self) <= 20",message="nodeSelector map can have at most 20 entries" + // TODO(alberto): add additional validation for the map key/values. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations when specified, define what custome tolerations are added to the hcp pods. + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// OLMCatalogPlacement is an enum specifying the placement of OLM catalog components. +// +kubebuilder:validation:Enum=management;guest +type OLMCatalogPlacement string + +const ( + // ManagementOLMCatalogPlacement indicates OLM catalog components will be placed in + // the management cluster. + ManagementOLMCatalogPlacement OLMCatalogPlacement = "management" + + // GuestOLMCatalogPlacement indicates OLM catalog components will be placed in + // the guest cluster. + GuestOLMCatalogPlacement OLMCatalogPlacement = "guest" +) + +func (olm *OLMCatalogPlacement) String() string { + return string(*olm) +} + +func (olm *OLMCatalogPlacement) Set(s string) error { + switch strings.ToLower(s) { + case "guest": + *olm = GuestOLMCatalogPlacement + case "management": + *olm = ManagementOLMCatalogPlacement + default: + return fmt.Errorf("unknown OLMCatalogPlacement type used '%s'", s) + } + return nil +} + +func (olm *OLMCatalogPlacement) Type() string { + return "OLMCatalogPlacement" +} + +// ImageContentSource specifies image mirrors that can be used by cluster nodes +// to pull content. For cluster workloads, if a container image registry host of +// the pullspec matches Source then one of the Mirrors are substituted as hosts +// in the pullspec and tried in order to fetch the image. +type ImageContentSource struct { + // Source is the repository that users refer to, e.g. in image pull + // specifications. + // + // +immutable + Source string `json:"source"` + + // Mirrors are one or more repositories that may also contain the same images. + // + // +optional + // +immutable + Mirrors []string `json:"mirrors,omitempty"` +} + +// ServicePublishingStrategyMapping specifies how individual control plane services endpoints are published for consumption. +// This includes APIServer;OAuthServer;Konnectivity;Ignition. +// If a given service is not present in this list, it will be exposed publicly by default. +type ServicePublishingStrategyMapping struct { + // service identifies the type of service being published. + // It can be APIServer;OAuthServer;Konnectivity;Ignition + // OVNSbDb;OIDC are no-op and kept for backward compatibility. + // This field is immutable. + // + // +kubebuilder:validation:Enum=APIServer;OAuthServer;OIDC;Konnectivity;Ignition;OVNSbDb + // +immutable + // +required + Service ServiceType `json:"service"` + + // servicePublishingStrategy specifies how to publish a service endpoint. + // +required + ServicePublishingStrategy `json:"servicePublishingStrategy"` +} + +// ServicePublishingStrategy specfies how to publish a ServiceType. +// +kubebuilder:validation:XValidation:rule="self.type == 'NodePort' ? has(self.nodePort) : !has(self.nodePort)",message="nodePort is required when type is NodePort, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'Route' ? !has(self.nodePort) && !has(self.loadBalancer) : !has(self.route)",message="only route is allowed when type is Route, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'LoadBalancer' ? !has(self.nodePort) && !has(self.route) : !has(self.loadBalancer)",message="only loadBalancer is required when type is LoadBalancer, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'None' ? !has(self.nodePort) && !has(self.route) && !has(self.loadBalancer) : true",message="None does not allowed any configuration for loadBalancer, nodePort, or route" +// +kubebuilder:validation:XValidation:rule="self.type == 'S3' ? !has(self.nodePort) && !has(self.route) && !has(self.loadBalancer) : true",message="S3 does not allowed any configuration for loadBalancer, nodePort, or route" + +type ServicePublishingStrategy struct { + // type is the publishing strategy used for the service. + // It can be LoadBalancer;NodePort;Route;None;S3 + // + // +kubebuilder:validation:Enum=LoadBalancer;NodePort;Route;None;S3 + // +required + Type PublishingStrategyType `json:"type"` + + // nodePort configures exposing a service using a NodePort. + // +optional + NodePort *NodePortPublishingStrategy `json:"nodePort,omitempty"` + + // loadBalancer configures exposing a service using a dedicated LoadBalancer. + // +optional + LoadBalancer *LoadBalancerPublishingStrategy `json:"loadBalancer,omitempty"` + + // route configures exposing a service using a Route through and an ingress controller behind a cloud Load Balancer. + // The specifics of the setup are platform dependent. + // +optional + Route *RoutePublishingStrategy `json:"route,omitempty"` +} + +// PublishingStrategyType defines publishing strategies for services. +type PublishingStrategyType string + +var ( + // LoadBalancer exposes a service with a LoadBalancer kube service. + LoadBalancer PublishingStrategyType = "LoadBalancer" + // NodePort exposes a service with a NodePort kube service. + NodePort PublishingStrategyType = "NodePort" + // Route exposes services with a Route + ClusterIP kube service. + Route PublishingStrategyType = "Route" + // S3 exposes a service through an S3 bucket + S3 PublishingStrategyType = "S3" + // None disables exposing the service + None PublishingStrategyType = "None" +) + +// ServiceType defines what control plane services can be exposed from the +// management control plane. +type ServiceType string + +var ( + // APIServer is the control plane API server. + APIServer ServiceType = "APIServer" + + // Konnectivity is the control plane Konnectivity networking service. + Konnectivity ServiceType = "Konnectivity" + + // OAuthServer is the control plane OAuth service. + OAuthServer ServiceType = "OAuthServer" + + // Ignition is the control plane ignition service for nodes. + Ignition ServiceType = "Ignition" + + // OVNSbDb is the optional control plane ovn southbound database service used by OVNKubernetes CNI. + // Deprecated: This service is no longer used by OVNKubernetes CNI for >= 4.14. + OVNSbDb ServiceType = "OVNSbDb" + + // OIDC is the control plane OIDC service. + // Deprecated: This service is no longer used by the control plane. + OIDC ServiceType = "OIDC" +) + +// NodePortPublishingStrategy specifies a NodePort used to expose a service. +type NodePortPublishingStrategy struct { + // address is the host/ip that the NodePort service is exposed over. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule=`self.matches('^(([a-zA-Z0-9][-a-zA-Z0-9]*\\.)+[a-zA-Z]{2,}|localhost)$') || self.matches('^((\\d{1,3}\\.){3}\\d{1,3})$') || self.matches('^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$')`, message="address must be a valid hostname, IPv4, or IPv6 address" + // +required + Address string `json:"address"` + + // port is the port of the NodePort service. If <=0, the port is dynamically + // assigned when the service is created. + Port int32 `json:"port,omitempty"` +} + +// LoadBalancerPublishingStrategy specifies setting used to expose a service as a LoadBalancer. +type LoadBalancerPublishingStrategy struct { + // hostname is the name of the DNS record that will be created pointing to the LoadBalancer and passed through to consumers of the service. + // If ommited, the value will be infered from the corev1.Service Load balancer type .status. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}$')`,message="baseDomain must be a valid base domain (e.g., example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +optional + Hostname string `json:"hostname,omitempty"` +} + +// RoutePublishingStrategy specifies options for exposing a service as a Route. +type RoutePublishingStrategy struct { + // Hostname is the name of the DNS record that will be created pointing to the Route and passed through to consumers of the service. + // If ommited, the value will be infered from management ingress.Spec.Domain. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}$')`,message="hostname must be a valid domain name (e.g., example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +optional + Hostname string `json:"hostname,omitempty"` +} + +// DNSSpec specifies the DNS configuration for the hosted cluster ingress. +type DNSSpec struct { + // baseDomain is the base domain of the hosted cluster. + // It will be used to confgure ingress in the hosted cluster through the subdomain baseDomainPrefix.baseDomain. + // If baseDomainPrefix is ommitted, the hostedCluster.name will be used as the subdomain. + // Once set, this field is immutable. + // When the value is the empty string "", the controller might default to a value depending on the platform. + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="baseDomain is immutable" + // +kubebuilder:validation:MaxLength=253 + // +immutable + // +required + BaseDomain string `json:"baseDomain"` + + // baseDomainPrefix is the base domain prefix for the hosted cluster ingress. + // It will be used to confgure ingress in the hosted cluster through the subdomain baseDomainPrefix.baseDomain. + // If baseDomainPrefix is ommitted, the hostedCluster.name will be used as the subdomain. + // Set baseDomainPrefix to an empty string "", if you don't want a prefix at all (not even hostedCluster.name) to be prepended to baseDomain. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPrefix is immutable" + // +kubebuilder:validation:MaxLength=253 + // +optional + BaseDomainPrefix *string `json:"baseDomainPrefix,omitempty"` + + // publicZoneID is the Hosted Zone ID where all the DNS records that are publicly accessible to the internet exist. + // This field is optional and mainly leveraged in cloud environments where the DNS records for the .baseDomain are created by controllers in this zone. + // Once set, this value is immutable. + // +optional + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="publicZoneID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +immutable + PublicZoneID string `json:"publicZoneID,omitempty"` + + // privateZoneID is the Hosted Zone ID where all the DNS records that are only available internally to the cluster exist. + // This field is optional and mainly leveraged in cloud environments where the DNS records for the .baseDomain are created by controllers in this zone. + // Once set, this value is immutable. + // +optional + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="privateZoneID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +immutable + PrivateZoneID string `json:"privateZoneID,omitempty"` +} + +// clusterNetworking specifies network configuration for a cluster. +// All CIDRs must be unique. Additional validation to check for CIDRs overlap and consistent network stack is perfomed by the controllers. +// Failing that validation will result in the HostedCluster being degraded and the validConfiguration condition being false. +// TODO this is available in vanilla kube from 1.31 API servers and in Openshift from 4.16. +// TODO(alberto): Use CEL cidr library for all these validation when all management clusters are >= 1.31. +// +kubebuilder:validation:XValidation:rule="(!has(self.machineNetwork) && self.clusterNetwork.all(c, self.serviceNetwork.all(s, c.cidr != s.cidr)) || (has(self.machineNetwork) && (self.machineNetwork.all(m, self.clusterNetwork.all(c, m.cidr != c.cidr)) && self.machineNetwork.all(m, self.serviceNetwork.all(s, m.cidr != s.cidr)) && self.clusterNetwork.all(c, self.serviceNetwork.all(s, c.cidr != s.cidr)))))",message="CIDR ranges in machineNetwork, clusterNetwork, and serviceNetwork must be unique and non-overlapping" +type ClusterNetworking struct { + // machineNetwork is the list of IP address pools for machines. + // This might be used among other things to generate appropriate networking security groups in some clouds providers. + // Currently only one entry or two for dual stack is supported. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="machineNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:ListType=atomic + // +immutable + // +optional + MachineNetwork []MachineNetworkEntry `json:"machineNetwork,omitempty"` + + // clusterNetwork is the list of IP address pools for pods. + // Defaults to cidr: "10.132.0.0/14". + // Currently only one entry is supported. + // This field is immutable. + // +immutable + // +optional + // +kubebuilder:default:={{cidr: "10.132.0.0/14"}} + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="clusterNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // serviceNetwork is the list of IP address pools for services. + // Defaults to cidr: "172.31.0.0/16". + // Currently only one entry is supported. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="serviceNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + // +optional + // +kubebuilder:default:={{cidr: "172.31.0.0/16"}} + ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork,omitempty"` + + // networkType specifies the SDN provider used for cluster networking. + // Defaults to OVNKubernetes. + // This field is required and immutable. + // kubebuilder:validation:XValidation:rule="self == oldSelf", message="networkType is immutable" + // +optional + // +kubebuilder:default:="OVNKubernetes" + // +immutable + NetworkType NetworkType `json:"networkType,omitempty"` + + // apiServer contains advanced network settings for the API server that affect + // how the APIServer is exposed inside a hosted cluster node. + // + // +immutable + APIServer *APIServerNetworking `json:"apiServer,omitempty"` +} + +// MachineNetworkEntry is a single IP address block for node IP blocks. +type MachineNetworkEntry struct { + // CIDR is the IP block address pool for machines within the cluster. + CIDR ipnet.IPNet `json:"cidr"` +} + +// ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks +// are allocated with size 2^HostSubnetLength. +type ClusterNetworkEntry struct { + // cidr is the IP block address pool. + CIDR ipnet.IPNet `json:"cidr"` + + // hostPrefix is the prefix size to allocate to each node from the CIDR. + // For example, 24 would allocate 2^(32-24)=2^8=256 adresses to each node. If this + // field is not used by the plugin, it can be left unset. + // +optional + HostPrefix int32 `json:"hostPrefix,omitempty"` +} + +// ServiceNetworkEntry is a single IP address block for the service network. +type ServiceNetworkEntry struct { + // cidr is the IP block address pool for services within the cluster in CIDR format (e.g., 192.168.1.0/24 or 2001:0db8::/64) + CIDR ipnet.IPNet `json:"cidr"` +} + +// +kubebuilder:validation:Pattern:=`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$` +type CIDRBlock string + +// APIServerNetworking specifies how the APIServer is exposed inside a cluster +// node. +type APIServerNetworking struct { + // advertiseAddress is the address that pods within the nodes will use to talk to the API + // server. This is an address associated with the loopback adapter of each + // node. If not specified, the controller will take default values. + // The default values will be set as 172.20.0.1 or fd00::1. + // This value is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="advertiseAddress is immutable" + // +optional + AdvertiseAddress *string `json:"advertiseAddress,omitempty"` + + // port is the port at which the APIServer is exposed inside a node. Other + // pods using host networking cannot listen on this port. + // If ommited 6443 is used. + // This is useful to choose a port other than the default one which might interfere with customer environments e.g. https://github.com/openshift/hypershift/pull/356. + // Setting this to 443 is possible only for backward compatibility reasons and it's discouraged. + // Doing so, it would result in the controller overriding the KAS endpoint in the guest cluster having a discrepancy with the KAS Pod and potentially causing temporarily network failures. + // This value is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="port is immutable" + // +optional + Port *int32 `json:"port,omitempty"` + + // allowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer + // If not specified, traffic is allowed from all addresses. + // This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges + AllowedCIDRBlocks []CIDRBlock `json:"allowedCIDRBlocks,omitempty"` +} + +// NetworkType specifies the SDN provider used for cluster networking. +// +// +kubebuilder:validation:Enum=OpenShiftSDN;Calico;OVNKubernetes;Other +type NetworkType string + +const ( + // OpenShiftSDN specifies OpenShiftSDN as the SDN provider + OpenShiftSDN NetworkType = "OpenShiftSDN" + + // Calico specifies Calico as the SDN provider + Calico NetworkType = "Calico" + + // OVNKubernetes specifies OVN as the SDN provider + OVNKubernetes NetworkType = "OVNKubernetes" + + // Other specifies an undefined SDN provider + Other NetworkType = "Other" +) + +// PlatformType is a specific supported infrastructure provider. +type PlatformType string + +const ( + // AWSPlatform represents Amazon Web Services infrastructure. + AWSPlatform PlatformType = "AWS" + + // NonePlatform represents user supplied (e.g. bare metal) infrastructure. + NonePlatform PlatformType = "None" + + // IBMCloudPlatform represents IBM Cloud infrastructure. + IBMCloudPlatform PlatformType = "IBMCloud" + + // AgentPlatform represents user supplied insfrastructure booted with agents. + AgentPlatform PlatformType = "Agent" + + // KubevirtPlatform represents Kubevirt infrastructure. + KubevirtPlatform PlatformType = "KubeVirt" + + // AzurePlatform represents Azure infrastructure. + AzurePlatform PlatformType = "Azure" + + // PowerVSPlatform represents PowerVS infrastructure. + PowerVSPlatform PlatformType = "PowerVS" + + // OpenStackPlatform represents OpenStack infrastructure. + OpenStackPlatform PlatformType = "OpenStack" +) + +// List all PlatformType instances +func PlatformTypes() []PlatformType { + return []PlatformType{ + AWSPlatform, + NonePlatform, + IBMCloudPlatform, + AgentPlatform, + KubevirtPlatform, + AzurePlatform, + PowerVSPlatform, + OpenStackPlatform, + } +} + +// PlatformSpec specifies the underlying infrastructure provider for the cluster +// and is used to configure platform specific behavior. +type PlatformSpec struct { + // Type is the type of infrastructure provider for the cluster. + // + // +unionDiscriminator + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Type is immutable" + // +immutable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None + // +openshift:validation:FeatureGateAwareEnum:featureGate=OpenStack,enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None;OpenStack + Type PlatformType `json:"type"` + + // AWS specifies configuration for clusters running on Amazon Web Services. + // + // +optional + // +immutable + AWS *AWSPlatformSpec `json:"aws,omitempty"` + + // Agent specifies configuration for agent-based installations. + // + // +optional + // +immutable + Agent *AgentPlatformSpec `json:"agent,omitempty"` + + // IBMCloud defines IBMCloud specific settings for components + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // Azure defines azure specific settings + Azure *AzurePlatformSpec `json:"azure,omitempty"` + + // PowerVS specifies configuration for clusters running on IBMCloud Power VS Service. + // This field is immutable. Once set, It can't be changed. + // + // +optional + // +immutable + PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` + + // KubeVirt defines KubeVirt specific settings for cluster components. + // + // +optional + // +immutable + Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` + + // OpenStack specifies configuration for clusters running on OpenStack. + // +optional + // +openshift:enable:FeatureGate=OpenStack + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` +} + +// IBMCloudPlatformSpec defines IBMCloud specific settings for components +type IBMCloudPlatformSpec struct { + // ProviderType is a specific supported infrastructure provider within IBM Cloud. + ProviderType configv1.IBMCloudProviderType `json:"providerType,omitempty"` +} + +// Release represents the metadata for an OCP release payload image. +type Release struct { + // Image is the image pullspec of an OCP release payload image. + // See https://quay.io/repository/openshift-release-dev/ocp-release?tab=tags for a list of available images. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(\\w+\\S+)$')`,message="Image must start with a word character (letters, digits, or underscores) and contain no white spaces" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +required + Image string `json:"image"` +} + +// ClusterAutoscaling specifies auto-scaling behavior that applies to all +// NodePools associated with a control plane. +type ClusterAutoscaling struct { + // maxNodesTotal is the maximum allowable number of nodes for the Autoscaler scale out to be operational. + // The autoscaler will not grow the cluster beyond this number. + // If omitted, the autoscaler will not have a maximum limit. + // number. + // + // +kubebuilder:validation:Minimum=0 + // +optional + MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"` + + // maxPodGracePeriod is the maximum seconds to wait for graceful pod + // termination before scaling down a NodePool. The default is 600 seconds. + // + // +kubebuilder:validation:Minimum=0 + // +optional + MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"` + + // maxNodeProvisionTime is the maximum time to wait for node provisioning + // before considering the provisioning to be unsuccessful, expressed as a Go + // duration string. The default is 15 minutes. + // + // +kubebuilder:validation:Pattern=^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + // +optional + MaxNodeProvisionTime string `json:"maxNodeProvisionTime,omitempty"` + + // podPriorityThreshold enables users to schedule "best-effort" pods, which + // shouldn't trigger autoscaler actions, but only run when there are spare + // resources available. The default is -10. + // + // See the following for more details: + // https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption + // + // +optional + PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"` +} + +// EtcdManagementType is a enum specifying the strategy for managing the cluster's etcd instance +// +kubebuilder:validation:Enum=Managed;Unmanaged +type EtcdManagementType string + +const ( + // Managed means HyperShift should provision and operator the etcd cluster + // automatically. + Managed EtcdManagementType = "Managed" + + // Unmanaged means HyperShift will not provision or manage the etcd cluster, + // and the user is responsible for doing so. + Unmanaged EtcdManagementType = "Unmanaged" +) + +// EtcdSpec specifies configuration for a control plane etcd cluster. +// +kubebuilder:validation:XValidation:rule="self.managementType == 'Managed' ? has(self.managed) : !has(self.managed)",message="Only managed configuration must be set when managementType is Managed" +// +kubebuilder:validation:XValidation:rule="self.managementType == 'Unmanaged' ? has(self.unmanaged) : !has(self.unmanaged)",message="Only unmanaged configuration must be set when managementType is Unmanaged" +type EtcdSpec struct { + // managementType defines how the etcd cluster is managed. + // This can be either Managed or Unmanaged. + // This field is immutable. + // +unionDiscriminator + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="managementType is immutable" + // +required + // +immutable + ManagementType EtcdManagementType `json:"managementType"` + + // managed specifies the behavior of an etcd cluster managed by HyperShift. + // + // +optional + // +immutable + Managed *ManagedEtcdSpec `json:"managed,omitempty"` + + // unmanaged specifies configuration which enables the control plane to + // integrate with an externally managed etcd cluster. + // + // +optional + // +immutable + Unmanaged *UnmanagedEtcdSpec `json:"unmanaged,omitempty"` +} + +// ManagedEtcdSpec specifies the behavior of an etcd cluster managed by +// HyperShift. +type ManagedEtcdSpec struct { + // storage specifies how etcd data is persisted. + //+required + Storage ManagedEtcdStorageSpec `json:"storage"` +} + +// ManagedEtcdStorageType is a storage type for an etcd cluster. +// +// +kubebuilder:validation:Enum=PersistentVolume +type ManagedEtcdStorageType string + +const ( + // PersistentVolumeEtcdStorage uses PersistentVolumes for etcd storage. + PersistentVolumeEtcdStorage ManagedEtcdStorageType = "PersistentVolume" +) + +var ( + DefaultPersistentVolumeEtcdStorageSize resource.Quantity = resource.MustParse("8Gi") +) + +// ManagedEtcdStorageSpec describes the storage configuration for etcd data. +type ManagedEtcdStorageSpec struct { + // type is the kind of persistent storage implementation to use for etcd. + // Only PersistentVolume is supported at the moment. + // +immutable + // +required + // +unionDiscriminator + Type ManagedEtcdStorageType `json:"type"` + + // persistentVolume is the configuration for PersistentVolume etcd storage. + // With this implementation, a PersistentVolume will be allocated for every + // etcd member (either 1 or 3 depending on the HostedCluster control plane + // availability configuration). + // + // +optional + PersistentVolume *PersistentVolumeEtcdStorageSpec `json:"persistentVolume,omitempty"` + + // restoreSnapshotURL allows an optional URL to be provided where + // an etcd snapshot can be downloaded, for example a pre-signed URL + // referencing a storage service. + // This snapshot will be restored on initial startup, only when the etcd PV + // is empty. + // + // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self.size() <= 1", message="RestoreSnapshotURL shouldn't contain more than 1 entry" + RestoreSnapshotURL []string `json:"restoreSnapshotURL,omitempty"` +} + +// PersistentVolumeEtcdStorageSpec is the configuration for PersistentVolume +// etcd storage. +type PersistentVolumeEtcdStorageSpec struct { + // storageClassName is the StorageClass of the data volume for each etcd member. + // See https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassName is immutable" + // +optional + // +immutable + // TODO(alberto): This shouldn't really be a pointer. There's no real different semantic for nil and empty string. Revisit all pointer vs non-pointer choices. + StorageClassName *string `json:"storageClassName,omitempty"` + + // size is the minimum size of the data volume for each etcd member. + // Default is 8Gi. + // This field is immutable + // +optional + // +kubebuilder:default="8Gi" + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Etcd PV storage size is immutable" + Size *resource.Quantity `json:"size,omitempty"` +} + +// UnmanagedEtcdSpec specifies configuration which enables the control plane to +// integrate with an eternally managed etcd cluster. +type UnmanagedEtcdSpec struct { + // endpoint is the full etcd cluster client endpoint URL. For example: + // + // https://etcd-client:2379 + // + // If the URL uses an HTTPS scheme, the TLS field is required. + // + // +kubebuilder:validation:Pattern=`^https://` + Endpoint string `json:"endpoint"` + + // tls specifies TLS configuration for HTTPS etcd client endpoints. + //+required + TLS EtcdTLSConfig `json:"tls"` +} + +// EtcdTLSConfig specifies TLS configuration for HTTPS etcd client endpoints. +type EtcdTLSConfig struct { + // ClientSecret refers to a secret for client mTLS authentication with the etcd cluster. It + // may have the following key/value pairs: + // + // etcd-client-ca.crt: Certificate Authority value + // etcd-client.crt: Client certificate value + // etcd-client.key: Client certificate key value + ClientSecret corev1.LocalObjectReference `json:"clientSecret"` +} + +// SecretEncryptionType defines the type of kube secret encryption being used. +// +kubebuilder:validation:Enum=kms;aescbc +type SecretEncryptionType string + +const ( + // KMS integrates with a cloud provider's key management service to do secret encryption + KMS SecretEncryptionType = "kms" + // AESCBC uses AES-CBC with PKCS#7 padding to do secret encryption + AESCBC SecretEncryptionType = "aescbc" +) + +// SecretEncryptionSpec contains metadata about the kubernetes secret encryption strategy being used for the +// cluster when applicable. +type SecretEncryptionSpec struct { + // Type defines the type of kube secret encryption being used + // +unionDiscriminator + Type SecretEncryptionType `json:"type"` + + // KMS defines metadata about the kms secret encryption strategy + // +optional + KMS *KMSSpec `json:"kms,omitempty"` + + // AESCBC defines metadata about the AESCBC secret encryption strategy + // +optional + AESCBC *AESCBCSpec `json:"aescbc,omitempty"` +} + +// KMSProvider defines the supported KMS providers +// +kubebuilder:validation:Enum=IBMCloud;AWS;Azure +type KMSProvider string + +const ( + IBMCloud KMSProvider = "IBMCloud" + AWS KMSProvider = "AWS" + AZURE KMSProvider = "Azure" +) + +// KMSSpec defines metadata about the kms secret encryption strategy +type KMSSpec struct { + // Provider defines the KMS provider + // +unionDiscriminator + Provider KMSProvider `json:"provider"` + // IBMCloud defines metadata for the IBM Cloud KMS encryption strategy + // +optional + IBMCloud *IBMCloudKMSSpec `json:"ibmcloud,omitempty"` + // AWS defines metadata about the configuration of the AWS KMS Secret Encryption provider + // +optional + AWS *AWSKMSSpec `json:"aws,omitempty"` + // Azure defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault + // +optional + Azure *AzureKMSSpec `json:"azure,omitempty"` +} + +// AESCBCSpec defines metadata about the AESCBC secret encryption strategy +type AESCBCSpec struct { + // ActiveKey defines the active key used to encrypt new secrets + ActiveKey corev1.LocalObjectReference `json:"activeKey"` + // BackupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *corev1.LocalObjectReference `json:"backupKey,omitempty"` +} + +type PayloadArchType string + +const ( + AMD64 PayloadArchType = "AMD64" + PPC64LE PayloadArchType = "PPC64LE" + S390X PayloadArchType = "S390X" + ARM64 PayloadArchType = "ARM64" + Multi PayloadArchType = "Multi" +) + +// ToPayloadArch converts a string to payloadArch. +func ToPayloadArch(arch string) PayloadArchType { + switch arch { + case "amd64", string(AMD64): + return AMD64 + case "arm64", string(ARM64): + return ARM64 + case "ppc64le", string(PPC64LE): + return PPC64LE + case "s390x", string(S390X): + return S390X + case "multi", string(Multi): + return Multi + default: + return "" + } +} + +// HostedClusterStatus is the latest observed status of a HostedCluster. +type HostedClusterStatus struct { + // Version is the status of the release version applied to the + // HostedCluster. + // +optional + Version *ClusterVersionStatus `json:"version,omitempty"` + + // KubeConfig is a reference to the secret containing the default kubeconfig + // for the cluster. + // +optional + KubeConfig *corev1.LocalObjectReference `json:"kubeconfig,omitempty"` + + // KubeadminPassword is a reference to the secret that contains the initial + // kubeadmin user password for the guest cluster. + // +optional + KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` + + // IgnitionEndpoint is the endpoint injected in the ign config userdata. + // It exposes the config for instances to become kubernetes nodes. + // +optional + IgnitionEndpoint string `json:"ignitionEndpoint,omitempty"` + + // ControlPlaneEndpoint contains the endpoint information by which + // external clients can access the control plane. This is populated + // after the infrastructure is ready. + // +kubebuilder:validation:Optional + ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"` + + // OAuthCallbackURLTemplate contains a template for the URL to use as a callback + // for identity providers. The [identity-provider-name] placeholder must be replaced + // with the name of an identity provider defined on the HostedCluster. + // This is populated after the infrastructure is ready. + // +kubebuilder:validation:Optional + OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"` + + // Conditions represents the latest available observations of a control + // plane's current state. + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // payloadArch represents the CPU architecture type of the HostedCluster.Spec.Release.Image. The valid values are: + // Multi, ARM64, AMD64, S390X, or PPC64LE. + // +kubebuilder:validation:Enum=Multi;ARM64;AMD64;PPC64LE;S390X + // +optional + PayloadArch PayloadArchType `json:"payloadArch,omitempty"` + + // Platform contains platform-specific status of the HostedCluster + // +optional + Platform *PlatformStatus `json:"platform,omitempty"` +} + +// PlatformStatus contains platform-specific status +type PlatformStatus struct { + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` +} + +// ClusterVersionStatus reports the status of the cluster versioning, +// including any upgrades that are in progress. The current field will +// be set to whichever version the cluster is reconciling to, and the +// conditions array will report whether the update succeeded, is in +// progress, or is failing. +// +k8s:deepcopy-gen=true +type ClusterVersionStatus struct { + // desired is the version that the cluster is reconciling towards. + // If the cluster is not yet fully initialized desired will be set + // with the information available, which may be an image or a tag. + Desired configv1.Release `json:"desired"` + + // history contains a list of the most recent versions applied to the cluster. + // This value may be empty during cluster startup, and then will be updated + // when a new update is being applied. The newest update is first in the + // list and it is ordered by recency. Updates in the history have state + // Completed if the rollout completed - if an update was failing or halfway + // applied the state will be Partial. Only a limited amount of update history + // is preserved. + // + // +optional + History []configv1.UpdateHistory `json:"history,omitempty"` + + // observedGeneration reports which version of the spec is being synced. + // If this value is not equal to metadata.generation, then the desired + // and conditions fields may represent a previous version. + ObservedGeneration int64 `json:"observedGeneration"` + + // availableUpdates contains updates recommended for this + // cluster. Updates which appear in conditionalUpdates but not in + // availableUpdates may expose this cluster to known issues. This list + // may be empty if no updates are recommended, if the update service + // is unavailable, or if an invalid channel has been specified. + // +nullable + // +kubebuilder:validation:Required + // +required + AvailableUpdates []configv1.Release `json:"availableUpdates"` + + // conditionalUpdates contains the list of updates that may be + // recommended for this cluster if it meets specific required + // conditions. Consumers interested in the set of updates that are + // actually recommended for this cluster should use + // availableUpdates. This list may be empty if no updates are + // recommended, if the update service is unavailable, or if an empty + // or invalid channel has been specified. + // +listType=atomic + // +optional + ConditionalUpdates []configv1.ConditionalUpdate `json:"conditionalUpdates,omitempty"` +} + +// ClusterConfiguration specifies configuration for individual OCP components in the +// cluster, represented as embedded resources that correspond to the openshift +// configuration API. +// +// The API for individual configuration items is at: +// https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html +type ClusterConfiguration struct { + // APIServer holds configuration (like serving certificates, client CA and CORS domains) + // shared by all API servers in the system, among them especially kube-apiserver + // and openshift-apiserver. + // +optional + APIServer *configv1.APIServerSpec `json:"apiServer,omitempty"` + + // Authentication specifies cluster-wide settings for authentication (like OAuth and + // webhook token authenticators). + // +optional + Authentication *configv1.AuthenticationSpec `json:"authentication,omitempty"` + + // FeatureGate holds cluster-wide information about feature gates. + // +optional + FeatureGate *configv1.FeatureGateSpec `json:"featureGate,omitempty"` + + // Image governs policies related to imagestream imports and runtime configuration + // for external registries. It allows cluster admins to configure which registries + // OpenShift is allowed to import images from, extra CA trust bundles for external + // registries, and policies to block or allow registry hostnames. + // When exposing OpenShift's image registry to the public, this also lets cluster + // admins specify the external hostname. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // TODO(alberto): elaborate why. + // +rollout + // +optional + Image *configv1.ImageSpec `json:"image,omitempty"` + + // Ingress holds cluster-wide information about ingress, including the default ingress domain + // used for routes. + // +optional + Ingress *configv1.IngressSpec `json:"ingress,omitempty"` + + // Network holds cluster-wide information about the network. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. + // Please view network.spec for an explanation on what applies when configuring this resource. + // TODO (csrwng): Add validation here to exclude changes that conflict with networking settings in the HostedCluster.Spec.Networking field. + // +optional + Network *configv1.NetworkSpec `json:"network,omitempty"` + + // OAuth holds cluster-wide information about OAuth. + // It is used to configure the integrated OAuth server. + // This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. + // +optional + // +kubebuilder:validation:XValidation:rule="!has(self.tokenConfig) || !has(self.tokenConfig.accessTokenInactivityTimeout) || duration(self.tokenConfig.accessTokenInactivityTimeout).getSeconds() >= 300", message="spec.configuration.oauth.tokenConfig.accessTokenInactivityTimeout minimum acceptable token timeout value is 300 seconds" + OAuth *configv1.OAuthSpec `json:"oauth,omitempty"` + + // OperatorHub specifies the configuration for the Operator Lifecycle Manager in the HostedCluster. This is only configured at deployment time but the controller are not reconcilling over it. + // The OperatorHub configuration will be constantly reconciled if catalog placement is management, but only on cluster creation otherwise. + // + // +optional + OperatorHub *configv1.OperatorHubSpec `json:"operatorhub,omitempty"` + + // Scheduler holds cluster-wide config information to run the Kubernetes Scheduler + // and influence its placement decisions. The canonical name for this config is `cluster`. + // +optional + Scheduler *configv1.SchedulerSpec `json:"scheduler,omitempty"` + + // Proxy holds cluster-wide information on how to configure default proxies for the cluster. + // This affects traffic flowing from the hosted cluster data plane. + // The controllers will generate a machineConfig with the proxy config for the cluster. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + + // +rollout + // +optional + Proxy *configv1.ProxySpec `json:"proxy,omitempty"` +} + +// +genclient + +// HostedCluster is the primary representation of a HyperShift cluster and encapsulates +// the control plane and common data plane configuration. Creating a HostedCluster +// results in a fully functional OpenShift control plane with no attached nodes. +// To support workloads (e.g. pods), a HostedCluster may have one or more associated +// NodePool resources. +// +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=hostedclusters,shortName=hc;hcs,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version.history[?(@.state==\"Completed\")].version",description="Version" +// +kubebuilder:printcolumn:name="KubeConfig",type="string",JSONPath=".status.kubeconfig.name",description="KubeConfig Secret" +// +kubebuilder:printcolumn:name="Progress",type="string",JSONPath=".status.version.history[?(@.state!=\"\")].state",description="Progress" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status",description="Available" +// +kubebuilder:printcolumn:name="Progressing",type="string",JSONPath=".status.conditions[?(@.type==\"Progressing\")].status",description="Progressing" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message",description="Message" +type HostedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is the desired behavior of the HostedCluster. + Spec HostedClusterSpec `json:"spec,omitempty"` + + // Status is the latest observed status of the HostedCluster. + Status HostedClusterStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// HostedClusterList contains a list of HostedCluster +type HostedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HostedCluster `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go new file mode 100644 index 000000000..e69ffc431 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go @@ -0,0 +1,68 @@ +package v1beta1 + +import corev1 "k8s.io/api/core/v1" + +// IBMCloudKMSSpec defines metadata for the IBM Cloud KMS encryption strategy +type IBMCloudKMSSpec struct { + // Region is the IBM Cloud region + Region string `json:"region"` + // Auth defines metadata for how authentication is done with IBM Cloud KMS + Auth IBMCloudKMSAuthSpec `json:"auth"` + // KeyList defines the list of keys used for data encryption + KeyList []IBMCloudKMSKeyEntry `json:"keyList"` +} + +// IBMCloudKMSKeyEntry defines metadata for an IBM Cloud KMS encryption key +type IBMCloudKMSKeyEntry struct { + // CRKID is the customer rook key id + CRKID string `json:"crkID"` + // InstanceID is the id for the key protect instance + InstanceID string `json:"instanceID"` + // CorrelationID is an identifier used to track all api call usage from hypershift + CorrelationID string `json:"correlationID"` + // URL is the url to call key protect apis over + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` + // KeyVersion is a unique number associated with the key. The number increments whenever a new + // key is enabled for data encryption. + KeyVersion int `json:"keyVersion"` +} + +// IBMCloudKMSAuthSpec defines metadata for how authentication is done with IBM Cloud KMS +type IBMCloudKMSAuthSpec struct { + // Type defines the IBM Cloud KMS authentication strategy + // +unionDiscriminator + Type IBMCloudKMSAuthType `json:"type"` + // Unmanaged defines the auth metadata the customer provides to interact with IBM Cloud KMS + // +optional + Unmanaged *IBMCloudKMSUnmanagedAuthSpec `json:"unmanaged,omitempty"` + // Managed defines metadata around the service to service authentication strategy for the IBM Cloud + // KMS system (all provider managed). + // +optional + Managed *IBMCloudKMSManagedAuthSpec `json:"managed,omitempty"` +} + +// IBMCloudKMSAuthType defines the IBM Cloud KMS authentication strategy +// +kubebuilder:validation:Enum=Managed;Unmanaged +type IBMCloudKMSAuthType string + +const ( + // IBMCloudKMSManagedAuth defines the KMS authentication strategy where the IKS/ROKS platform uses + // service to service auth to call IBM Cloud KMS APIs (no customer credentials requried) + IBMCloudKMSManagedAuth IBMCloudKMSAuthType = "Managed" + // IBMCloudKMSUnmanagedAuth defines the KMS authentication strategy where a customer supplies IBM Cloud + // authentication to interact with IBM Cloud KMS APIs + IBMCloudKMSUnmanagedAuth IBMCloudKMSAuthType = "Unmanaged" +) + +// IBMCloudKMSUnmanagedAuthSpec defines the auth metadata the customer provides to interact with IBM Cloud KMS +type IBMCloudKMSUnmanagedAuthSpec struct { + // Credentials should reference a secret with a key field of IBMCloudIAMAPIKeySecretKey that contains a apikey to + // call IBM Cloud KMS APIs + Credentials corev1.LocalObjectReference `json:"credentials"` +} + +// IBMCloudKMSManagedAuthSpec defines metadata around the service to service authentication strategy for the IBM Cloud +// KMS system (all provider managed). +type IBMCloudKMSManagedAuthSpec struct { +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go new file mode 100644 index 000000000..340a27f3a --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go @@ -0,0 +1,382 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + QoSClassBurstable QoSClass = "Burstable" + QoSClassGuaranteed QoSClass = "Guaranteed" +) + +type QoSClass string + +// KubevirtCompute contains values associated with the virtual compute hardware requested for the VM. +type KubevirtCompute struct { + // Memory represents how much guest memory the VM should have + // + // +optional + // +kubebuilder:default="8Gi" + Memory *resource.Quantity `json:"memory"` + + // Cores represents how many cores the guest VM should have + // + // +optional + // +kubebuilder:default=2 + Cores *uint32 `json:"cores"` + + // QosClass If set to "Guaranteed", requests the scheduler to place the VirtualMachineInstance on a node with + // limit memory and CPU, equal to be the requested values, to set the VMI as a Guaranteed QoS Class; + // See here for more details: + // https://kubevirt.io/user-guide/operations/node_overcommit/#requesting-the-right-qos-class-for-virtualmachineinstances + // + // +optional + // +kubebuilder:validation:Enum=Burstable;Guaranteed + // +kubebuilder:default=Burstable + QosClass *QoSClass `json:"qosClass,omitempty"` +} + +// +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany;ReadOnly;ReadWriteOncePod +type PersistentVolumeAccessMode corev1.PersistentVolumeAccessMode + +// KubevirtPersistentVolume contains the values involved with provisioning persistent storage for a KubeVirt VM. +type KubevirtPersistentVolume struct { + // Size is the size of the persistent storage volume + // + // +optional + // +kubebuilder:default="32Gi" + Size *resource.Quantity `json:"size"` + // StorageClass is the storageClass used for the underlying PVC that hosts the volume + // + // +optional + StorageClass *string `json:"storageClass,omitempty"` + // AccessModes is an array that contains the desired Access Modes the root volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes + // + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // VolumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + // +kubebuilder:validation:Enum=Filesystem;Block + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` +} + +// KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider +type KubevirtCachingStrategyType string + +const ( + // KubevirtCachingStrategyNone means that hypershift will not cache the boot image + KubevirtCachingStrategyNone KubevirtCachingStrategyType = "None" + + // KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using + // a QCOW boot image, and is ignored when using a container image + KubevirtCachingStrategyPVC KubevirtCachingStrategyType = "PVC" +) + +// KubevirtCachingStrategy defines the boot image caching strategy +type KubevirtCachingStrategy struct { + // Type is the type of the caching strategy + // +kubebuilder:default=None + // +kubebuilder:validation:Enum=None;PVC + Type KubevirtCachingStrategyType `json:"type"` +} + +// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from. +type KubevirtRootVolume struct { + // Image represents what rhcos image to use for the node pool + // + // +optional + Image *KubevirtDiskImage `json:"diskImage,omitempty"` + + // KubevirtVolume represents of type of storage to run the image on + KubevirtVolume `json:",inline"` + + // CacheStrategy defines the boot image caching strategy. Default - no caching + // +optional + CacheStrategy *KubevirtCachingStrategy `json:"cacheStrategy,omitempty"` +} + +// KubevirtVolumeType is a specific supported KubeVirt volumes +// +// +kubebuilder:validation:Enum=Persistent +type KubevirtVolumeType string + +const ( + // KubevirtVolumeTypePersistent represents persistent volume for kubevirt VMs + KubevirtVolumeTypePersistent KubevirtVolumeType = "Persistent" +) + +// KubevirtVolume represents what kind of storage to use for a KubeVirt VM volume +type KubevirtVolume struct { + // Type represents the type of storage to associate with the kubevirt VMs. + // + // +optional + // +unionDiscriminator + // +kubebuilder:default=Persistent + Type KubevirtVolumeType `json:"type"` + + // Persistent volume type means the VM's storage is backed by a PVC + // VMs that use persistent volumes can survive disruption events like restart and eviction + // This is the default type used when no storage type is defined. + // + // +optional + Persistent *KubevirtPersistentVolume `json:"persistent,omitempty"` +} + +// KubevirtDiskImage contains values representing where the rhcos image is located +type KubevirtDiskImage struct { + // ContainerDiskImage is a string representing the container image that holds the root disk + // + // +optional + ContainerDiskImage *string `json:"containerDiskImage,omitempty"` +} + +type MultiQueueSetting string + +const ( + MultiQueueEnable MultiQueueSetting = "Enable" + MultiQueueDisable MultiQueueSetting = "Disable" +) + +// KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating +// on KubeVirt platform. +type KubevirtNodePoolPlatform struct { + // RootVolume represents values associated with the VM volume that will host rhcos + // +kubebuilder:default={persistent: {size: "32Gi"}, type: "Persistent"} + RootVolume *KubevirtRootVolume `json:"rootVolume"` + + // Compute contains values representing the virtual hardware requested for the VM + // + // +optional + // +kubebuilder:default={memory: "8Gi", cores: 2} + Compute *KubevirtCompute `json:"compute"` + + // NetworkInterfaceMultiQueue If set to "Enable", virtual network interfaces configured with a virtio bus will also + // enable the vhost multiqueue feature for network devices. The number of queues created depends on additional + // factors of the VirtualMachineInstance, like the number of guest CPUs. + // + // +optional + // +kubebuilder:validation:Enum=Enable;Disable + // +kubebuilder:default=Enable + NetworkInterfaceMultiQueue *MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` + + // AdditionalNetworks specify the extra networks attached to the nodes + // + // +optional + AdditionalNetworks []KubevirtNetwork `json:"additionalNetworks,omitempty"` + + // AttachDefaultNetwork specify if the default pod network should be attached to the nodes + // this can only be set to false if AdditionalNetworks are configured + // + // +optional + // +kubebuilder:default=true + AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + + // NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. + // Selector which must match a node's labels for the VM to be scheduled on that node. More info: + // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // KubevirtHostDevices specifies the host devices (e.g. GPU devices) to be passed + // from the management cluster, to the nodepool nodes + KubevirtHostDevices []KubevirtHostDevice `json:"hostDevices,omitempty"` +} + +// KubevirtNetwork specifies the configuration for a virtual machine +// network interface +type KubevirtNetwork struct { + // Name specify the network attached to the nodes + // it is a value with the format "[namespace]/[name]" to reference the + // multus network attachment definition + Name string `json:"name"` +} + +type KubevirtHostDevice struct { + // DeviceName is the name of the host device that is desired to be utilized in the HostedCluster's NodePool + // The device can be any supported PCI device, including GPU, either as a passthrough or a vGPU slice. + DeviceName string `json:"deviceName"` + + // Count is the number of instances the specified host device will be attached to each of the + // NodePool's nodes. Default is 1. + // + // +optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=1 + Count int `json:"count,omitempty"` +} + +// KubeVirtNodePoolStatus contains the KubeVirt platform statuses +type KubeVirtNodePoolStatus struct { + // CacheName holds the name of the cache DataVolume, if exists + // +optional + CacheName string `json:"cacheName,omitempty"` + + // Credentials shows the client credentials used when creating KubeVirt virtual machines. + // This filed is only exists when the KubeVirt virtual machines are being placed + // on a cluster separate from the one hosting the Hosted Control Plane components. + // + // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on + // the same cluster and namespace as the Hosted Control Plane. + // +optional + Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` +} +type KubevirtPlatformCredentials struct { + // InfraKubeConfigSecret is a reference to a secret that contains the kubeconfig for the external infra cluster + // that will be used to host the KubeVirt virtual machines for this cluster. + // + // +immutable + // +kubebuilder:validation:Required + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraKubeConfigSecret is immutable" + InfraKubeConfigSecret *KubeconfigSecretRef `json:"infraKubeConfigSecret,omitempty"` + + // InfraNamespace defines the namespace on the external infra cluster that is used to host the KubeVirt + // virtual machines. This namespace must already exist before creating the HostedCluster and the kubeconfig + // referenced in the InfraKubeConfigSecret must have access to manage the required resources within this + // namespace. + // + // +immutable + // +kubebuilder:validation:Required + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraNamespace is immutable" + InfraNamespace string `json:"infraNamespace"` +} + +// KubevirtPlatformSpec specifies configuration for kubevirt guest cluster installations +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.generateID) || has(self.generateID)", message="Kubevirt GenerateID is required once set" +type KubevirtPlatformSpec struct { + // BaseDomainPassthrough toggles whether or not an automatically + // generated base domain for the guest cluster should be used that + // is a subdomain of the management cluster's *.apps DNS. + // + // For the KubeVirt platform, the basedomain can be autogenerated using + // the *.apps domain of the management/infra hosting cluster + // This makes the guest cluster's base domain a subdomain of the + // hypershift infra/mgmt cluster's base domain. + // + // Example: + // Infra/Mgmt cluster's DNS + // Base: example.com + // Cluster: mgmt-cluster.example.com + // Apps: *.apps.mgmt-cluster.example.com + // KubeVirt Guest cluster's DNS + // Base: apps.mgmt-cluster.example.com + // Cluster: guest.apps.mgmt-cluster.example.com + // Apps: *.apps.guest.apps.mgmt-cluster.example.com + // + // This is possible using OCP wildcard routes + // + // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPassthrough is immutable" + BaseDomainPassthrough *bool `json:"baseDomainPassthrough,omitempty"` + + // GenerateID is used to uniquely apply a name suffix to resources associated with + // kubevirt infrastructure resources + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Kubevirt GenerateID is immutable once set" + // +kubebuilder:validation:MaxLength=11 + // +optional + GenerateID string `json:"generateID,omitempty"` + // Credentials defines the client credentials used when creating KubeVirt virtual machines. + // Defining credentials is only necessary when the KubeVirt virtual machines are being placed + // on a cluster separate from the one hosting the Hosted Control Plane components. + // + // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on + // the same cluster and namespace as the Hosted Control Plane. + // +optional + Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` + + // StorageDriver defines how the KubeVirt CSI driver exposes StorageClasses on + // the infra cluster (hosting the VMs) to the guest cluster. + // + // +kubebuilder:validation:Optional + // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver is immutable" + StorageDriver *KubevirtStorageDriverSpec `json:"storageDriver,omitempty"` +} + +// KubevirtStorageDriverConfigType defines how the kubevirt storage driver is configured. +// +// +kubebuilder:validation:Enum=None;Default;Manual +type KubevirtStorageDriverConfigType string + +const ( + // NoneKubevirtStorageDriverConfigType means no kubevirt storage driver is used + NoneKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "None" + + // DefaultKubevirtStorageDriverConfigType means the kubevirt storage driver maps to the + // underlying infra cluster's default storageclass + DefaultKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Default" + + // ManualKubevirtStorageDriverConfigType means the kubevirt storage driver mapping is + // explicitly defined. + ManualKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Manual" +) + +type KubevirtStorageDriverSpec struct { + // Type represents the type of kubevirt csi driver configuration to use + // + // +unionDiscriminator + // +immutable + // +kubebuilder:default=Default + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Type is immutable" + Type KubevirtStorageDriverConfigType `json:"type,omitempty"` + + // Manual is used to explicilty define how the infra storageclasses are + // mapped to guest storageclasses + // + // +immutable + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Manual is immutable" + Manual *KubevirtManualStorageDriverConfig `json:"manual,omitempty"` +} + +type KubevirtManualStorageDriverConfig struct { + // StorageClassMapping maps StorageClasses on the infra cluster hosting + // the KubeVirt VMs to StorageClasses that are made available within the + // Guest Cluster. + // + // NOTE: It is possible that not all capablities of an infra cluster's + // storageclass will be present for the corresponding guest clusters storageclass. + // + // +optional + // +immutable + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassMapping is immutable" + StorageClassMapping []KubevirtStorageClassMapping `json:"storageClassMapping,omitempty"` + + // +optional + // +immutable + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="volumeSnapshotClassMapping is immutable" + VolumeSnapshotClassMapping []KubevirtVolumeSnapshotClassMapping `json:"volumeSnapshotClassMapping,omitempty"` +} + +type KubevirtStorageClassMapping struct { + // Group contains which group this mapping belongs to. + Group string `json:"group,omitempty"` + // InfraStorageClassName is the name of the infra cluster storage class that + // will be exposed to the guest. + InfraStorageClassName string `json:"infraStorageClassName"` + // GuestStorageClassName is the name that the corresponding storageclass will + // be called within the guest cluster + GuestStorageClassName string `json:"guestStorageClassName"` +} + +type KubevirtVolumeSnapshotClassMapping struct { + // Group contains which group this mapping belongs to. + Group string `json:"group,omitempty"` + // InfraStorageClassName is the name of the infra cluster volume snapshot class that + // will be exposed to the guest. + InfraVolumeSnapshotClassName string `json:"infraVolumeSnapshotClassName"` + // GuestVolumeSnapshotClassName is the name that the corresponding volumeSnapshotClass will + // be called within the guest cluster + GuestVolumeSnapshotClassName string `json:"guestVolumeSnapshotClassName"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go new file mode 100644 index 000000000..4a057d665 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go @@ -0,0 +1,119 @@ +package v1beta1 + +// Conditions +const ( + // NodePoolValidGeneratedPayloadConditionType signals if the ignition sever generated an ignition payload successfully for Nodes in that pool. + // A failure here often means a software bug or a non-stable cluster. + NodePoolValidGeneratedPayloadConditionType = "ValidGeneratedPayload" + // NodePoolValidPlatformImageType signals if an OS image e.g. an AMI was found successfully based on the consumer input e.g. releaseImage. + // If the image is direct user input then this condition is meaningless. + // A failure here is unlikely to resolve without the changing user input. + NodePoolValidPlatformImageType = "ValidPlatformImage" + // NodePoolValidReleaseImageConditionType signals if the input in nodePool.spec.release.image is valid. + // A failure here is unlikely to resolve without the changing user input. + NodePoolValidReleaseImageConditionType = "ValidReleaseImage" + // NodePoolValidMachineConfigConditionType signals if the content within nodePool.spec.config is valid. + // A failure here is unlikely to resolve without the changing user input. + NodePoolValidMachineConfigConditionType = "ValidMachineConfig" + // NodePoolValidTuningConfigConditionType signals if the content within nodePool.spec.tuningConfig is valid. + // A failure here is unlikely to resolve without the changing user input. + NodePoolValidTuningConfigConditionType = "ValidTuningConfig" + + // NodePoolUpdateManagementEnabledConditionType signals if the nodePool.spec.management input is valid. + // A failure here is unlikely to resolve without the changing user input. + NodePoolUpdateManagementEnabledConditionType = "UpdateManagementEnabled" + // NodePoolAutoscalingEnabledConditionType signals if nodePool.spec.replicas and nodePool.spec.AutoScaling input is valid. + // A failure here is unlikely to resolve without the changing user input. + NodePoolAutoscalingEnabledConditionType = "AutoscalingEnabled" + // NodePoolAutorepairEnabledConditionType signals if MachineHealthChecks resources were created successfully. + // A failure here often means a software bug or a non-stable cluster. + NodePoolAutorepairEnabledConditionType = "AutorepairEnabled" + + // NodePoolUpdatingVersionConditionType signals if a version update is currently happening in NodePool. + NodePoolUpdatingVersionConditionType = "UpdatingVersion" + // NodePoolUpdatingConfigConditionType signals if a config update is currently happening in NodePool. + NodePoolUpdatingConfigConditionType = "UpdatingConfig" + // NodePoolUpdatingPlatformMachineTemplateConditionType signals if a platform machine template update is currently happening in NodePool. + NodePoolUpdatingPlatformMachineTemplateConditionType = "UpdatingPlatformMachineTemplate" + // NodePoolReadyConditionType bubbles up CAPI MachineDeployment/MachineSet Ready condition. + // This is true when all replicas are ready Nodes. + // When this is false for too long, NodePoolAllMachinesReadyConditionType and NodePoolAllNodesHealthyConditionType might provide more context. + NodePoolReadyConditionType = "Ready" + // NodePoolAllMachinesReadyConditionType bubbles up and aggregates CAPI Machine Ready condition. + // It signals when the infrastructure for a Machine resource was created successfully. + // https://github.com/kubernetes-sigs/cluster-api/blob/main/api/v1beta1/condition_consts.go + // A failure here may require external user intervention to resolve. E.g. hitting quotas on the cloud provider. + NodePoolAllMachinesReadyConditionType = "AllMachinesReady" + // NodePoolAllNodesHealthyConditionType bubbles up and aggregates CAPI NodeHealthy condition. + // It signals when the Node for a Machine resource is healthy. + // https://github.com/kubernetes-sigs/cluster-api/blob/main/api/v1beta1/condition_consts.go + // A failure here often means a software bug or a non-stable cluster. + NodePoolAllNodesHealthyConditionType = "AllNodesHealthy" + + // NodePoolReconciliationActiveConditionType signals the state of nodePool.spec.pausedUntil. + NodePoolReconciliationActiveConditionType = "ReconciliationActive" + + // NodePoolReachedIgnitionEndpoint signals if at least an instance was able to reach the ignition endpoint to get the payload. + // When this is false for too long it may require external user intervention to resolve. E.g. Enable AWS security groups to enable networking access. + NodePoolReachedIgnitionEndpoint = "ReachedIgnitionEndpoint" + + // NodePoolAWSSecurityGroupAvailableConditionType signals whether the NodePool has an available security group to use. + // If the security group is specified for the NodePool, this condition is always true. If no security group is specified + // for the NodePool, the status of this condition depends on the availability of the default security group in the HostedCluster. + NodePoolAWSSecurityGroupAvailableConditionType = "AWSSecurityGroupAvailable" + + // NodePoolValidMachineTemplateConditionType signal that the machine template created by the node pool is valid + NodePoolValidMachineTemplateConditionType = "ValidMachineTemplate" + + // NodePoolClusterNetworkCIDRConflictType signals if a NodePool's machine objects are colliding with the + // cluster network's CIDR range. This can indicate why some network functionality might be degraded. + NodePoolClusterNetworkCIDRConflictType = "ClusterNetworkCIDRConflict" + + // KubeVirtNodesLiveMigratable indicates if all (VirtualMachines) nodes of the kubevirt + // hosted cluster can be live migrated without experiencing a node restart + NodePoolKubeVirtLiveMigratableType = "KubeVirtNodesLiveMigratable" +) + +// PerformanceProfile Conditions +const ( + + // NodePoolPerformanceProfileTuningConditionTypePrefix is a common prefix to all PerformanceProfile + // status conditions reported by NTO + NodePoolPerformanceProfileTuningConditionTypePrefix = "performance.operator.openshift.io" + + // NodePoolPerformanceProfileTuningAvailableConditionType signals that the PerformanceProfile associated with the + // NodePool is available and its tunings were being applied successfully. + NodePoolPerformanceProfileTuningAvailableConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Available" + + // NodePoolPerformanceProfileTuningProgressingConditionType signals that the PerformanceProfile associated with the + // NodePool is in the middle of its tuning processing and its in progressing state. + NodePoolPerformanceProfileTuningProgressingConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Progressing" + + // NodePoolPerformanceProfileTuningUpgradeableConditionType signals that it's safe to + // upgrade the PerformanceProfile operator component + NodePoolPerformanceProfileTuningUpgradeableConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Upgradeable" + + // NodePoolPerformanceProfileTuningDegradedConditionType signals that the PerformanceProfile associated with the + // NodePool is failed to apply its tuning. + // This is usually happening because more lower-level components failed to apply successfully, like + // MachineConfig or KubeletConfig + NodePoolPerformanceProfileTuningDegradedConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Degraded" +) + +// Reasons +const ( + NodePoolValidationFailedReason = "ValidationFailed" + NodePoolInplaceUpgradeFailedReason = "InplaceUpgradeFailed" + NodePoolNotFoundReason = "NotFound" + NodePoolFailedToGetReason = "FailedToGet" + IgnitionEndpointMissingReason = "IgnitionEndpointMissing" + IgnitionCACertMissingReason = "IgnitionCACertMissing" + IgnitionNotReached = "ignitionNotReached" + DefaultAWSSecurityGroupNotReadyReason = "DefaultSGNotReady" + NodePoolValidArchPlatform = "ValidArchPlatform" + NodePoolInvalidArchPlatform = "InvalidArchPlatform" + InvalidKubevirtMachineTemplate = "InvalidKubevirtMachineTemplate" + InvalidOpenStackMachineTemplate = "InvalidOpenStackMachineTemplate" + CIDRConflictReason = "CIDRConflict" + NodePoolKubeVirtLiveMigratableReason = "KubeVirtNodesNotLiveMigratable" +) diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go new file mode 100644 index 000000000..c0f4a2c4c --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go @@ -0,0 +1,534 @@ +package v1beta1 + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // TODO - for the next API bump; the case on these needs to be changed. + // See https://github.com/openshift/hypershift/pull/4538#discussion_r1765165827 for more details. + ArchitectureAMD64 = "amd64" + ArchitectureS390X = "s390x" + ArchitecturePPC64LE = "ppc64le" + ArchitectureARM64 = "arm64" + ArchitectureMulti = "multi" + + // NodePoolLabel is used to label Nodes. + NodePoolLabel = "hypershift.openshift.io/nodePool" + + // IgnitionServerTokenExpirationTimestampAnnotation holds the time that a ignition token expires and should be + // removed from the cluster. + IgnitionServerTokenExpirationTimestampAnnotation = "hypershift.openshift.io/ignition-token-expiration-timestamp" +) + +var ( + // ArchAliases contains the RHCOS release metadata aliases for the different architectures supported as API input. + ArchAliases = map[string]string{ + ArchitectureAMD64: "x86_64", + ArchitectureARM64: "aarch64", + } +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodePool{}, + &NodePoolList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil + }) +} + +// +genclient + +// NodePool is a scalable set of worker nodes attached to a HostedCluster. +// NodePool machine architectures are uniform within a given pool, and are +// independent of the control plane’s underlying machine architecture. +// +// +kubebuilder:resource:path=nodepools,shortName=np;nps,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:object:root=true +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.clusterName",description="Cluster" +// +kubebuilder:printcolumn:name="Desired Nodes",type="integer",JSONPath=".spec.replicas",description="Desired Nodes" +// +kubebuilder:printcolumn:name="Current Nodes",type="integer",JSONPath=".status.replicas",description="Available Nodes" +// +kubebuilder:printcolumn:name="Autoscaling",type="string",JSONPath=".status.conditions[?(@.type==\"AutoscalingEnabled\")].status",description="Autoscaling Enabled" +// +kubebuilder:printcolumn:name="Autorepair",type="string",JSONPath=".status.conditions[?(@.type==\"AutorepairEnabled\")].status",description="Node Autorepair Enabled" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Current version" +// +kubebuilder:printcolumn:name="UpdatingVersion",type="string",JSONPath=".status.conditions[?(@.type==\"UpdatingVersion\")].status",description="UpdatingVersion in progress" +// +kubebuilder:printcolumn:name="UpdatingConfig",type="string",JSONPath=".status.conditions[?(@.type==\"UpdatingConfig\")].status",description="UpdatingConfig in progress" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Message" +type NodePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is the desired behavior of the NodePool. + Spec NodePoolSpec `json:"spec,omitempty"` + + // Status is the latest observed status of the NodePool. + Status NodePoolStatus `json:"status,omitempty"` +} + +// NodePoolSpec is the desired behavior of a NodePool. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.arch) || has(self.arch)", message="Arch is required once set" +// +kubebuilder:validation:XValidation:rule="self.arch != 'arm64' || has(self.platform.aws) || has(self.platform.azure)", message="Setting Arch to arm64 is only supported for AWS and Azure" +// +kubebuilder:validation:XValidation:rule="!has(self.replicas) || !has(self.autoScaling)", message="Both replicas or autoScaling should not be set" +type NodePoolSpec struct { + // clusterName is the name of the HostedCluster this NodePool belongs to. + // If a HostedCluster with this name doesn't exist, the controller will no-op until it exists. + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="ClusterName is immutable" + // +required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="clusterName must consist of lowercase alphanumeric characters or '-', start and end with an alphanumeric character, and be between 1 and 253 characters" + ClusterName string `json:"clusterName"` + + // release specifies the OCP release used for the NodePool. This informs the + // ignition configuration for machines which includes the kubelet version, as well as other platform specific + // machine properties (e.g. an AMI on the AWS platform). + // It's not supported to use a release in a NodePool which minor version skew against the Control Plane release is bigger than N-2. Although there's no enforcement that prevents this from happening. + // Attempting to use a release with a bigger skew might result in unpredictable behaviour. + // Attempting to use a release higher than the HosterCluster one will result in the NodePool being degraded and the ValidReleaseImage condition being false. + // Attempting to use a release lower than the current NodePool y-stream will result in the NodePool being degraded and the ValidReleaseImage condition being false. + // Changing this field will trigger a NodePool rollout. + // +rollout + // +required + Release Release `json:"release"` + + // platform specifies the underlying infrastructure provider for the NodePool + // and is used to configure platform specific behavior. + // +required + Platform NodePoolPlatform `json:"platform"` + + // replicas is the desired number of nodes the pool should maintain. If unset, the controller default value is 0. + // replicas is mutually exclusive with autoscaling. If autoscaling is configured, replicas must be omitted and autoscaling will control the NodePool size internally. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // management specifies behavior for managing nodes in the pool, such as + // upgrade strategies and auto-repair behaviors. + // +required + Management NodePoolManagement `json:"management"` + + // autoscaling specifies auto-scaling behavior for the NodePool. + // autoscaling is mutually exclusive with replicas. If replicas is set, this field must be ommited. + // + // +optional + AutoScaling *NodePoolAutoScaling `json:"autoScaling,omitempty"` + + // config is a list of references to ConfigMaps containing serialized + // MachineConfig resources to be injected into the ignition configurations of + // nodes in the NodePool. The MachineConfig API schema is defined here: + // + // https://github.com/openshift/machine-config-operator/blob/18963e4f8fe66e8c513ca4b131620760a414997f/pkg/apis/machineconfiguration.openshift.io/v1/types.go#L185 + // + // Each ConfigMap must have a single key named "config" whose value is the YML + // with one or more serialized machineconfiguration.openshift.io resources: + // + // * KubeletConfig + // * ContainerRuntimeConfig + // * MachineConfig + // * ClusterImagePolicy + // * ImageContentSourcePolicy + // * ImageDigestMirrorSet + // + // This is validated in the backend and signaled back via validMachineConfig condition. + // Changing this field will trigger a NodePool rollout. + // +rollout + // +kubebuilder:validation:Optional + Config []corev1.LocalObjectReference `json:"config,omitempty"` + + // nodeDrainTimeout is the maximum amount of time that the controller will spend on retrying to drain a node until it succeeds. + // The default value is 0, meaning that the node can retry drain without any time limitations. + // Changing this field propagate inplace into existing Nodes. + // +optional + NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + + // nodeVolumeDetachTimeout is the maximum amount of time that the controller will spend on detaching volumes from a node. + // The default value is 0, meaning that the volumes will be detached from the node without any time limitations. + // After the timeout, any remaining attached volumes will be ignored and the removal of the machine will continue. + // Changing this field propagate inplace into existing Nodes. + // +optional + NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + + // nodeLabels propagates a list of labels to Nodes, only once on creation. + // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // +optional + NodeLabels map[string]string `json:"nodeLabels,omitempty"` + + // taints if specified, propagates a list of taints to Nodes, only once on creation. + // These taints are additive to the ones applied by other controllers + // +kubebuilder:validation:MaxItems=50 + // +optional + Taints []Taint `json:"taints,omitempty"` + + // pausedUntil is a field that can be used to pause reconciliation on the NodePool controller. Resulting in any change to the NodePool being ignored. + // Either a date can be provided in RFC3339 format or a boolean as in 'true', 'false', 'True', 'False'. If a date is + // provided: reconciliation is paused on the resource until that date. If the boolean true is + // provided: reconciliation is paused on the resource until the field is removed. + // +kubebuilder:validation:MaxLength=35 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*$') || self in ['true', 'false', 'True', 'False']`,message="PausedUntil must be a date in RFC3339 format or 'True', 'true', 'False' or 'false'" + // +optional + PausedUntil *string `json:"pausedUntil,omitempty"` + + // tuningConfig is a list of references to ConfigMaps containing serialized + // Tuned or PerformanceProfile resources to define the tuning configuration to be applied to + // nodes in the NodePool. The Tuned API is defined here: + // + // https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go + // + // The PerformanceProfile API is defined here: + // https://github.com/openshift/cluster-node-tuning-operator/tree/b41042d42d4ba5bb2e99960248cf1d6ae4935018/pkg/apis/performanceprofile/v2 + // + // Each ConfigMap must have a single key named "tuning" whose value is the + // JSON or YAML of a serialized Tuned or PerformanceProfile. + // Changing this field will trigger a NodePool rollout. + // +kubebuilder:validation:Optional + TuningConfig []corev1.LocalObjectReference `json:"tuningConfig,omitempty"` + + // arch is the preferred processor architecture for the NodePool. Different platforms might have different supported architectures. + // TODO: This is set as optional to prevent validation from failing due to a limitation on client side validation with open API machinery: + // https://github.com/kubernetes/kubernetes/issues/108768#issuecomment-1253912215 + // TODO Add s390x to enum validation once the architecture is supported + // + // +kubebuilder:default:=amd64 + // +kubebuilder:validation:Enum=arm64;amd64;ppc64le + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Arch is immutable" + // +optional + Arch string `json:"arch,omitempty"` +} + +// NodePoolStatus is the latest observed status of a NodePool. +type NodePoolStatus struct { + // Replicas is the latest observed number of nodes in the pool. + // + // +optional + Replicas int32 `json:"replicas"` + + // Version is the semantic version of the latest applied release specified by + // the NodePool. + // + // +kubebuilder:validation:Optional + Version string `json:"version,omitempty"` + + // Platform hols the specific statuses + Platform *NodePoolPlatformStatus `json:"platform,omitempty"` + + // Conditions represents the latest available observations of the node pool's + // current state. + // +optional + Conditions []NodePoolCondition `json:"conditions,omitempty"` +} + +// NodePoolList contains a list of NodePools. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type NodePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodePool `json:"items"` +} + +// UpgradeType is a type of high-level upgrade behavior nodes in a NodePool. +type UpgradeType string + +const ( + // UpgradeTypeReplace is a strategy which replaces nodes using surge node + // capacity. + UpgradeTypeReplace = UpgradeType("Replace") + + // UpgradeTypeInPlace is a strategy which replaces nodes in-place with no + // additional node capacity requirements. + UpgradeTypeInPlace = UpgradeType("InPlace") +) + +func (p *UpgradeType) String() string { + return string(*p) +} + +func (p *UpgradeType) Set(s string) error { + switch strings.ToLower(s) { + case "replace": + *p = UpgradeTypeReplace + case "inplace": + *p = UpgradeTypeInPlace + default: + return fmt.Errorf("unknown upgrade type used '%s'", s) + } + return nil +} + +func (p *UpgradeType) Type() string { + return "UpgradeType" +} + +// UpgradeStrategy is a specific strategy for upgrading nodes in a NodePool. +type UpgradeStrategy string + +const ( + // UpgradeStrategyRollingUpdate means use a rolling update for nodes. + UpgradeStrategyRollingUpdate = UpgradeStrategy("RollingUpdate") + + // UpgradeStrategyOnDelete replaces old nodes when the deletion of the + // associated node instances are completed. + UpgradeStrategyOnDelete = UpgradeStrategy("OnDelete") +) + +// ReplaceUpgrade specifies upgrade behavior that replaces existing nodes +// according to a given strategy. +// +kubebuilder:validation:XValidation:rule="!has(self.rollingUpdate) || self.strategy == 'RollingUpdate'", message="The 'rollingUpdate' field can only be set when 'strategy' is 'RollingUpdate'" +type ReplaceUpgrade struct { + // strategy is the node replacement strategy for nodes in the pool. + // In can be either "RollingUpdate" or "OnDelete". RollingUpdate will rollout Nodes honoring maxSurge and maxUnavailable. + // OnDelete provide more granular control and will replace nodes as the old ones are manually deleted. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=RollingUpdate;OnDelete + Strategy UpgradeStrategy `json:"strategy"` + + // rollingUpdate specifies a rolling update strategy which upgrades nodes by + // creating new nodes and deleting the old ones. + // + // +kubebuilder:validation:Optional + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` +} + +// RollingUpdate specifies a rolling update strategy which upgrades nodes by +// creating new nodes and deleting the old ones. +type RollingUpdate struct { + // maxUnavailable is the maximum number of nodes that can be unavailable + // during the update. + // + // Value can be an absolute number (ex: 5) or a percentage of desired nodes + // (ex: 10%). + // + // Absolute number is calculated from percentage by rounding down. + // + // This can not be 0 if MaxSurge is 0. + // + // Defaults to 0. + // + // Example: when this is set to 30%, old nodes can be deleted down to 70% of + // desired nodes immediately when the rolling update starts. Once new nodes + // are ready, more old nodes be deleted, followed by provisioning new nodes, + // ensuring that the total number of nodes available at all times during the + // update is at least 70% of desired nodes. + // + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + + // maxSurge is the maximum number of nodes that can be provisioned above the + // desired number of nodes. + // + // Value can be an absolute number (ex: 5) or a percentage of desired nodes + // (ex: 10%). + // + // Absolute number is calculated from percentage by rounding up. + // + // This can not be 0 if MaxUnavailable is 0. + // + // Defaults to 1. + // + // Example: when this is set to 30%, new nodes can be provisioned immediately + // when the rolling update starts, such that the total number of old and new + // nodes do not exceed 130% of desired nodes. Once old nodes have been + // deleted, new nodes can be provisioned, ensuring that total number of nodes + // running at any time during the update is at most 130% of desired nodes. + // + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` +} + +// InPlaceUpgrade specifies an upgrade strategy which upgrades nodes in-place +// without any new nodes being created or any old nodes being deleted. +type InPlaceUpgrade struct { + // maxUnavailable is the maximum number of nodes that can be unavailable + // during the update. + // + // Value can be an absolute number (ex: 5) or a percentage of desired nodes + // (ex: 10%). + // + // Absolute number is calculated from percentage by rounding down. + // + // Defaults to 1. + // + // Example: when this is set to 30%, a max of 30% of the nodes can be made + // unschedulable/unavailable immediately when the update starts. Once a set + // of nodes is updated, more nodes can be made unschedulable for update, + // ensuring that the total number of nodes schedulable at all times during + // the update is at least 70% of desired nodes. + // + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` +} + +// NodePoolManagement specifies behavior for managing nodes in a NodePool, such +// as upgrade strategies and auto-repair behaviors. +// +kubebuilder:validation:XValidation:rule="!has(self.inPlace) || self.upgradeType == 'InPlace'", message="The 'inPlace' field can only be set when 'upgradeType' is 'InPlace'" +type NodePoolManagement struct { + // upgradeType specifies the type of strategy for handling upgrades. + // This can be either "Replace" or "InPlace". + // "Replace" will update Nodes by recreating the underlying instances. + // "InPlace" will update Nodes by applying changes to the existing instances. This might or might not result in a reboot. + // + // +kubebuilder:validation:Enum=Replace;InPlace + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="UpgradeType is immutable" + // +required + UpgradeType UpgradeType `json:"upgradeType"` + + // replace is the configuration for rolling upgrades. + // It defaults to a RollingUpdate strategy with maxSurge of 1 and maxUnavailable of 0. + // + // +kubebuilder:validation:Optional + // +kubebuilder:default={strategy: "RollingUpdate", rollingUpdate: {maxSurge: 1, maxUnavailable: 0 }} + Replace *ReplaceUpgrade `json:"replace,omitempty"` + + // inPlace is the configuration for in-place upgrades. + // + // +kubebuilder:validation:Optional + InPlace *InPlaceUpgrade `json:"inPlace,omitempty"` + + // autoRepair specifies whether health checks should be enabled for machines in the NodePool. The default is false. + // Enabling this feature will cause the controller to automatically delete unhealthy machines. + // The unhealthy criteria is reserved for the controller implementation and subject to change. + // But generally it's determined by checking the Node ready condition is true and a timeout that might vary depending on the platform provider. + // AutoRepair will no-op when more than 2 Nodes are unhealthy at the same time. Giving time for the cluster to stabilize or to the user to manually intervene. + // +optional + // +kubebuilder:default=false + AutoRepair bool `json:"autoRepair"` +} + +// NodePoolAutoScaling specifies auto-scaling behavior for a NodePool. +// +kubebuilder:validation:XValidation:rule="self.max >= self.min", message="max must be equal or greater than min" +type NodePoolAutoScaling struct { + // Min is the minimum number of nodes to maintain in the pool. Must be >= 1 and <= .Max. + // + // +kubebuilder:validation:Minimum=1 + Min int32 `json:"min"` + + // Max is the maximum number of nodes allowed in the pool. Must be >= 1 and >= Min. + // + // +kubebuilder:validation:Minimum=1 + Max int32 `json:"max"` +} + +// NodePoolPlatform specifies the underlying infrastructure provider for the +// NodePool and is used to configure platform specific behavior. +type NodePoolPlatform struct { + // Type specifies the platform name. + // + // +unionDiscriminator + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Type is immutable" + // +immutable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None + // +openshift:validation:FeatureGateAwareEnum:featureGate=OpenStack,enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None;OpenStack + Type PlatformType `json:"type"` + + // AWS specifies the configuration used when operating on AWS. + // + // +optional + AWS *AWSNodePoolPlatform `json:"aws,omitempty"` + + // IBMCloud defines IBMCloud specific settings for components + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // Kubevirt specifies the configuration used when operating on KubeVirt platform. + // + // +optional + Kubevirt *KubevirtNodePoolPlatform `json:"kubevirt,omitempty"` + + // Agent specifies the configuration used when using Agent platform. + // + // +optional + Agent *AgentNodePoolPlatform `json:"agent,omitempty"` + + Azure *AzureNodePoolPlatform `json:"azure,omitempty"` + + // PowerVS specifies the configuration used when using IBMCloud PowerVS platform. + // + // +optional + PowerVS *PowerVSNodePoolPlatform `json:"powervs,omitempty"` + + // OpenStack specifies the configuration used when using OpenStack platform. + // +optional + // +openshift:enable:FeatureGate=OpenStack + OpenStack *OpenStackNodePoolPlatform `json:"openstack,omitempty"` +} + +// We define our own condition type since metav1.Condition has validation +// for Reason that might be broken by what we bubble up from CAPI. +// NodePoolCondition defines an observation of NodePool resource operational state. +type NodePoolCondition struct { + // Type of condition in CamelCase or in foo.example.com/CamelCase. + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + // can be useful (see .node.status.conditions), the ability to deconflict is important. + Type string `json:"type"` + + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + + // Severity provides an explicit classification of Reason code, so the users or machines can immediately + // understand the current situation and act accordingly. + // The Severity field MUST be set only when Status=False. + // +optional + Severity string `json:"severity,omitempty"` + + // Last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when + // the API field changed is acceptable. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // The reason for the condition's last transition in CamelCase. + // The specific API may choose whether or not this field is considered a guaranteed API. + // This field may not be empty. + // +optional + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + // This field may be empty. + // +optional + Message string `json:"message,omitempty"` + + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// taint is as v1 Core but without TimeAdded. +// https://github.com/kubernetes/kubernetes/blob/ed8cad1e80d096257921908a52ac69cf1f41a098/staging/src/k8s.io/api/core/v1/types.go#L3037-L3053 +// Validation replicates the same validation as the upstream https://github.com/kubernetes/kubernetes/blob/9a2a7537f035969a68e432b4cc276dbce8ce1735/pkg/util/taints/taints.go#L273. +// See also https://kubernetes.io/docs/concepts/overview/working-with-objects/names/. +type Taint struct { + // key is the taint key to be applied to a node. + // +required + // +kubebuilder:validation:XValidation:rule=`self.matches('^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\\/)?[A-Za-z0-9]([-A-Za-z0-9_.]{0,61}[A-Za-z0-9])?$')`,message="key must be a qualified name with an optional subdomain prefix e.g. example.com/MyName" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + Key string `json:"key"` + + // value is the taint value corresponding to the taint key. + // +optional + // +kubebuilder:validation:XValidation:rule=`self.matches('^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$')`,message="Value must start and end with alphanumeric characters and can only contain '-', '_', '.' in the middle" + // +kubebuilder:validation:MaxLength=253 + Value string `json:"value,omitempty"` + // +required + // effect is the effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + // +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute + Effect corev1.TaintEffect `json:"effect"` +} + +// NodePoolPlatformStatus contains specific platform statuses +type NodePoolPlatformStatus struct { + // KubeVirt contains the KubeVirt platform statuses + // +optional + KubeVirt *KubeVirtNodePoolStatus `json:"kubeVirt,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go new file mode 100644 index 000000000..ca9adac61 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go @@ -0,0 +1,407 @@ +package v1beta1 + +// PortSecurityPolicy defines whether or not to enable port security on a port. +type PortSecurityPolicy string + +const ( + // PortSecurityEnabled enables port security on a port. + PortSecurityEnabled PortSecurityPolicy = "Enabled" + + // PortSecurityDisabled disables port security on a port. + PortSecurityDisabled PortSecurityPolicy = "Disabled" + + // PortSecurityDefault uses the default port security policy. + PortSecurityDefault PortSecurityPolicy = "" +) + +type OpenStackNodePoolPlatform struct { + // Flavor is the OpenStack flavor to use for the node instances. + // + // +kubebuilder:validation:Required + // +required + Flavor string `json:"flavor"` + + // ImageName is the OpenStack Glance image name to use for node instances. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + ImageName string `json:"imageName,omitempty"` + + // availabilityZone is the nova availability zone in which the provider will create the VM. + // If not specified, the VM will be created in the default availability zone specified in the nova configuration. + // Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances + // are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs + // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. + // The maximum length of availability zone name is 63 as per labels limits. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[^: ]*$` + // +kubebuilder:validation:MaxLength=63 + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // AdditionalPorts is a list of additional ports to create on the node instances. + // + // +optional + AdditionalPorts []PortSpec `json:"additionalPorts,omitempty"` +} + +// OpenStackPlatformSpec specifies configuration for clusters running on OpenStack. +type OpenStackPlatformSpec struct { + // IdentityRef is a reference to a secret holding OpenStack credentials + // to be used when reconciling the hosted cluster. + // + // +kubebuilder:validation:Required + // +required + IdentityRef OpenStackIdentityReference `json:"identityRef"` + + // ManagedSubnets describe the OpenStack Subnet to be created. Cluster actuator will create a network, + // and a subnet with the defined DNSNameservers, AllocationPools and the CIDR defined in the HostedCluster + // MachineNetwork, and a router connected to the subnet. Currently only one IPv4 + // subnet is supported. + // + // +kubebuilder:validation:MaxItems=1 + // +listType=atomic + // +optional + ManagedSubnets []SubnetSpec `json:"managedSubnets,omitempty"` + + // Router specifies an existing router to be used if ManagedSubnets are + // specified. If specified, no new router will be created. + // + // +optional + Router *RouterParam `json:"router,omitempty"` + + // Network specifies an existing network to use if no ManagedSubnets + // are specified. + // +optional + Network *NetworkParam `json:"network,omitempty"` + + // Subnets specifies existing subnets to use if not ManagedSubnets are + // specified. All subnets must be in the network specified by Network. + // There can be zero, one, or two subnets. If no subnets are specified, + // all subnets in Network will be used. If 2 subnets are specified, one + // must be IPv4 and the other IPv6. + // + // +kubebuilder:validation:MaxItems=2 + // +listType=atomic + // +optional + Subnets []SubnetParam `json:"subnets,omitempty"` + + // NetworkMTU sets the maximum transmission unit (MTU) value to address fragmentation for the private network ID. + // This value will be used only if the Cluster actuator creates the network. + // If left empty, the network will have the default MTU defined in Openstack network service. + // To use this field, the Openstack installation requires the net-mtu neutron API extension. + // + // +optional + NetworkMTU *int `json:"networkMTU,omitempty"` + + // ExternalNetwork is the OpenStack Network to be used to get public internet to the VMs. + // This option is ignored if DisableExternalNetwork is set to true. + // + // If ExternalNetwork is defined it must refer to exactly one external network. + // + // If ExternalNetwork is not defined or is empty the controller will use any + // existing external network as long as there is only one. It is an + // error if ExternalNetwork is not defined and there are multiple + // external networks unless DisableExternalNetwork is also set. + // + // If ExternalNetwork is not defined and there are no external networks + // the controller will proceed as though DisableExternalNetwork was set. + // + // +optional + ExternalNetwork *NetworkParam `json:"externalNetwork,omitempty"` + + // DisableExternalNetwork specifies whether or not to attempt to connect the cluster + // to an external network. This allows for the creation of clusters when connecting + // to an external network is not possible or desirable, e.g. if using a provider network. + // + // +optional + DisableExternalNetwork *bool `json:"disableExternalNetwork,omitempty"` + + // Tags to set on all resources in cluster which support tags + // + // +listType=set + // +optional + Tags []string `json:"tags,omitempty"` +} + +// OpenStackIdentityReference is a reference to an infrastructure +// provider identity to be used to provision cluster resources. +type OpenStackIdentityReference struct { + // Name is the name of a secret in the same namespace as the resource being provisioned. + // The secret must contain a key named `clouds.yaml` which contains an OpenStack clouds.yaml file. + // The secret may optionally contain a key named `cacert` containing a PEM-encoded CA certificate. + // + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // CloudName specifies the name of the entry in the clouds.yaml file to use. + // + // +kubebuilder:validation:Required + // +required + CloudName string `json:"cloudName"` +} + +type SubnetSpec struct { + // DNSNameservers holds a list of DNS server addresses that will be provided when creating + // the subnet. These addresses need to have the same IP version as CIDR. + // + // +optional + DNSNameservers []string `json:"dnsNameservers,omitempty"` + + // AllocationPools is an array of AllocationPool objects that will be applied to OpenStack Subnet being created. + // If set, OpenStack will only allocate these IPs for Machines. It will still be possible to create ports from + // outside of these ranges manually. + // + // +optional + AllocationPools []AllocationPool `json:"allocationPools,omitempty"` +} + +type AllocationPool struct { + // Start represents the start of the AllocationPool, that is the lowest IP of the pool. + // + // +kubebuilder:validation:Required + // +required + Start string `json:"start"` + + // End represents the end of the AlloctionPool, that is the highest IP of the pool. + // + // +kubebuilder:validation:Required + // +required + End string `json:"end"` +} + +// RouterParam specifies an OpenStack router to use. It may be specified by either ID or filter, but not both. +// +kubebuilder:validation:MaxProperties:=1 +// +kubebuilder:validation:MinProperties:=1 +type RouterParam struct { + // ID is the ID of the router to use. If ID is provided, the other filters cannot be provided. Must be in UUID format. + // + // +kubebuilder:validation:Format:=uuid + // +optional + ID *string `json:"id,omitempty"` + + // Filter specifies a filter to select an OpenStack router. If provided, cannot be empty. + // + // +optional + Filter *RouterFilter `json:"filter,omitempty"` +} + +// RouterFilter specifies a query to select an OpenStack router. At least one property must be set. +// +kubebuilder:validation:MinProperties:=1 +type RouterFilter struct { + // Name is the name of the router to filter by. + // + // +optional + Name string `json:"name,omitempty"` + + // Description is the description of the router to filter by. + // + // +optional + Description string `json:"description,omitempty"` + + // ProjectID is the project ID of the router to filter by. + // + // +optional + ProjectID string `json:"projectID,omitempty"` + + // FilterByNeutronTags specifies tags to filter by. + // + // +optional + FilterByNeutronTags `json:",inline"` +} + +// NetworkParam specifies an OpenStack network. It may be specified by either ID or Filter, but not both. +// +kubebuilder:validation:MaxProperties:=1 +// +kubebuilder:validation:MinProperties:=1 +type NetworkParam struct { + // ID is the ID of the network to use. If ID is provided, the other filters cannot be provided. Must be in UUID format. + // + // +kubebuilder:validation:Format:=uuid + // +optional + ID *string `json:"id,omitempty"` + + // Filter specifies a filter to select an OpenStack network. If provided, cannot be empty. + // + // +optional + Filter *NetworkFilter `json:"filter,omitempty"` +} + +// NetworkFilter specifies a query to select an OpenStack network. At least one property must be set. +// +kubebuilder:validation:MinProperties:=1 +type NetworkFilter struct { + // Name is the name of the network to filter by. + // + // +optional + Name string `json:"name,omitempty"` + + // Description is the description of the network to filter by. + // + // +optional + Description string `json:"description,omitempty"` + + // ProjectID is the project ID of the network to filter by. + // + // +optional + ProjectID string `json:"projectID,omitempty"` + + // FilterByNeutronTags specifies tags to filter by. + // + // +optional + FilterByNeutronTags `json:",inline"` +} + +// NeutronTag represents a tag on a Neutron resource. +// It may not be empty and may not contain commas. +// +kubebuilder:validation:Pattern:="^[^,]+$" +// +kubebuilder:validation:MinLength:=1 +type NeutronTag string + +type FilterByNeutronTags struct { + // Tags is a list of tags to filter by. If specified, the resource must + // have all of the tags specified to be included in the result. + // + // +listType=set + // +optional + Tags []NeutronTag `json:"tags,omitempty"` + + // TagsAny is a list of tags to filter by. If specified, the resource + // must have at least one of the tags specified to be included in the + // result. + // + // +listType=set + // +optional + TagsAny []NeutronTag `json:"tagsAny,omitempty"` + + // NotTags is a list of tags to filter by. If specified, resources which + // contain all of the given tags will be excluded from the result. + // + // +listType=set + // +optional + NotTags []NeutronTag `json:"notTags,omitempty"` + + // NotTagsAny is a list of tags to filter by. If specified, resources + // which contain any of the given tags will be excluded from the result. + // + // +listType=set + // +optional + NotTagsAny []NeutronTag `json:"notTagsAny,omitempty"` +} + +// SubnetParam specifies an OpenStack subnet to use. It may be specified by either ID or filter, but not both. +// +kubebuilder:validation:MaxProperties:=1 +// +kubebuilder:validation:MinProperties:=1 +type SubnetParam struct { + // ID is the uuid of the subnet. It will not be validated. + // + // +kubebuilder:validation:Format:=uuid + // +optional + ID *string `json:"id,omitempty"` + + // Filter specifies a filter to select the subnet. It must match exactly one subnet. + // + // +optional + Filter *SubnetFilter `json:"filter,omitempty"` +} + +// SubnetFilter specifies a filter to select a subnet. At least one parameter must be specified. +// +kubebuilder:validation:MinProperties:=1 +type SubnetFilter struct { + // Name is the name of the subnet to filter by. + // + // +optional + Name string `json:"name,omitempty"` + // Description is the description of the subnet to filter by. + // + // +optional + Description string `json:"description,omitempty"` + + // ProjectID is the project ID of the subnet to filter by. + // + // +optional + ProjectID string `json:"projectID,omitempty"` + + // IPVersion is the IP version of the subnet to filter by. + // + // +optional + IPVersion int `json:"ipVersion,omitempty"` + + // GatewayIP is the gateway IP of the subnet to filter by. + // + // +optional + GatewayIP string `json:"gatewayIP,omitempty"` + + // CIDR is the CIDR of the subnet to filter by. + // + // +optional + CIDR string `json:"cidr,omitempty"` + + // IPv6AddressMode is the IPv6 address mode of the subnet to filter by. + // + // +optional + IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` + + // IPv6RAMode is the IPv6 RA mode of the subnet to filter by. + // + // +optional + IPv6RAMode string `json:"ipv6RAMode,omitempty"` + + // FilterByNeutronTags specifies tags to filter by. + // + // +optional + FilterByNeutronTags `json:",inline"` +} + +// PortSpec specifies the options for creating a port. +type PortSpec struct { + // Network is a query for an openstack network that the port will be created or discovered on. + // This will fail if the query returns more than one network. + // + // +optional + Network *NetworkParam `json:"network,omitempty"` + + // Description is a human-readable description for the port. + // + // +optional + Description string `json:"description,omitempty"` + + // AllowedAddressPairs is a list of address pairs which Neutron will + // allow the port to send traffic from in addition to the port's + // addresses. If not specified, the MAC Address will be the MAC Address + // of the port. Depending on the configuration of Neutron, it may be + // supported to specify a CIDR instead of a specific IP address. + // + // +optional + AllowedAddressPairs []AddressPair `json:"allowedAddressPairs,omitempty"` + + // VNICType specifies the type of vNIC which this port should be + // attached to. This is used to determine which mechanism driver(s) to + // be used to bind the port. The valid values are normal, macvtap, + // direct, baremetal, direct-physical, virtio-forwarder, smart-nic and + // remote-managed, although these values will not be validated in this + // API to ensure compatibility with future neutron changes or custom + // implementations. What type of vNIC is actually available depends on + // deployments. If not specified, the Neutron default value is used. + // + // +optional + VNICType string `json:"vnicType,omitempty"` + + // PortSecurityPolicy specifies whether or not to enable port security on the port. + // Allowed values are "Enabled", "Disabled" and omitted. + // When not set, it takes the value of the corresponding field at the network level. + // + // +kubebuilder:validation:Enum:=Enabled;Disabled;"" + // +optional + PortSecurityPolicy PortSecurityPolicy `json:"portSecurityPolicy,omitempty"` +} + +type AddressPair struct { + // IPAddress is the IP address of the allowed address pair. Depending on + // the configuration of Neutron, it may be supported to specify a CIDR + // instead of a specific IP address. + // + // +kubebuilder:validation:Required + // +required + IPAddress string `json:"ipAddress"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go new file mode 100644 index 000000000..24fa77149 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go @@ -0,0 +1,295 @@ +package v1beta1 + +import ( + "fmt" + + "github.com/openshift/hypershift/api/ibmcapi" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PowerVSNodePoolProcType defines processor type to be used for PowerVSNodePoolPlatform +type PowerVSNodePoolProcType string + +func (p *PowerVSNodePoolProcType) String() string { + return string(*p) +} + +func (p *PowerVSNodePoolProcType) Set(s string) error { + switch s { + case string(PowerVSNodePoolSharedProcType), string(PowerVSNodePoolCappedProcType), string(PowerVSNodePoolDedicatedProcType): + *p = PowerVSNodePoolProcType(s) + return nil + default: + return fmt.Errorf("unknown processor type used %s", s) + } +} + +func (p *PowerVSNodePoolProcType) Type() string { + return "PowerVSNodePoolProcType" +} + +const ( + // PowerVSNodePoolDedicatedProcType defines dedicated processor type + PowerVSNodePoolDedicatedProcType = PowerVSNodePoolProcType("dedicated") + + // PowerVSNodePoolSharedProcType defines shared processor type + PowerVSNodePoolSharedProcType = PowerVSNodePoolProcType("shared") + + // PowerVSNodePoolCappedProcType defines capped processor type + PowerVSNodePoolCappedProcType = PowerVSNodePoolProcType("capped") +) + +func (p *PowerVSNodePoolProcType) CastToCAPIPowerVSProcessorType() ibmcapi.PowerVSProcessorType { + switch *p { + case PowerVSNodePoolDedicatedProcType: + return ibmcapi.PowerVSProcessorTypeDedicated + case PowerVSNodePoolCappedProcType: + return ibmcapi.PowerVSProcessorTypeCapped + default: + return ibmcapi.PowerVSProcessorTypeShared + } +} + +// PowerVSNodePoolStorageType defines storage type to be used for PowerVSNodePoolPlatform +type PowerVSNodePoolStorageType string + +// PowerVSNodePoolImageDeletePolicy defines image delete policy to be used for PowerVSNodePoolPlatform +type PowerVSNodePoolImageDeletePolicy string + +// PowerVSNodePoolPlatform specifies the configuration of a NodePool when operating +// on IBMCloud PowerVS platform. +type PowerVSNodePoolPlatform struct { + // SystemType is the System type used to host the instance. + // systemType determines the number of cores and memory that is available. + // Few of the supported SystemTypes are s922,e880,e980. + // e880 systemType available only in Dallas Datacenters. + // e980 systemType available in Datacenters except Dallas and Washington. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default. The current default is s922 which is generally available. + // + // +optional + // +kubebuilder:default=s922 + SystemType string `json:"systemType,omitempty"` + + // ProcessorType is the VM instance processor type. + // It must be set to one of the following values: Dedicated, Capped or Shared. + // + // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. + // Shared: Shared among other clients. + // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. + // + // if the processorType is selected as Dedicated, then Processors value cannot be fractional. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default. The current default is shared. + // + // +kubebuilder:default=shared + // +kubebuilder:validation:Enum=dedicated;shared;capped + // +optional + ProcessorType PowerVSNodePoolProcType `json:"processorType,omitempty"` + + // Processors is the number of virtual processors in a virtual machine. + // when the processorType is selected as Dedicated the processors value cannot be fractional. + // maximum value for the Processors depends on the selected SystemType. + // when SystemType is set to e880 or e980 maximum Processors value is 143. + // when SystemType is set to s922 maximum Processors value is 15. + // minimum value for Processors depends on the selected ProcessorType. + // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. + // when ProcessorType is set as Dedicated, The minimum processors is 1. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default. The default is set based on the selected ProcessorType. + // when ProcessorType selected as Dedicated, the default is set to 1. + // when ProcessorType selected as Shared or Capped, the default is set to 0.5. + // + // +optional + // +kubebuilder:default="0.5" + Processors intstr.IntOrString `json:"processors,omitempty"` + + // MemoryGiB is the size of a virtual machine's memory, in GiB. + // maximum value for the MemoryGiB depends on the selected SystemType. + // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. + // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. + // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. + // The minimum memory is 32 GiB. + // + // When omitted, this means the user has no opinion and the platform is left to choose a reasonable + // default. The current default is 32. + // + // +optional + // +kubebuilder:default=32 + MemoryGiB int32 `json:"memoryGiB,omitempty"` + + // Image used for deploying the nodes. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + Image *PowerVSResourceReference `json:"image,omitempty"` + + // StorageType for the image and nodes, this will be ignored if Image is specified. + // The storage tiers in PowerVS are based on I/O operations per second (IOPS). + // It means that the performance of your storage volumes is limited to the maximum number of IOPS based on volume size and storage tier. + // Although, the exact numbers might change over time, the Tier 3 storage is currently set to 3 IOPS/GB, and the Tier 1 storage is currently set to 10 IOPS/GB. + // + // The default is tier1 + // + // +kubebuilder:default=tier1 + // +kubebuilder:validation:Enum=tier1;tier3 + // +optional + StorageType PowerVSNodePoolStorageType `json:"storageType,omitempty"` + + // ImageDeletePolicy is policy for the image deletion. + // + // delete: delete the image from the infrastructure. + // retain: delete the image from the openshift but retain in the infrastructure. + // + // The default is delete + // + // +kubebuilder:default=delete + // +kubebuilder:validation:Enum=delete;retain + // +optional + ImageDeletePolicy PowerVSNodePoolImageDeletePolicy `json:"imageDeletePolicy,omitempty"` +} + +// PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components +type PowerVSPlatformSpec struct { + // AccountID is the IBMCloud account id. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + AccountID string `json:"accountID"` + + // CISInstanceCRN is the IBMCloud CIS Service Instance's Cloud Resource Name + // This field is immutable. Once set, It can't be changed. + // + // +kubebuilder:validation:Pattern=`^crn:` + // +immutable + CISInstanceCRN string `json:"cisInstanceCRN"` + + // ResourceGroup is the IBMCloud Resource Group in which the cluster resides. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + ResourceGroup string `json:"resourceGroup"` + + // Region is the IBMCloud region in which the cluster resides. This configures the + // OCP control plane cloud integrations, and is used by NodePool to resolve + // the correct boot image for a given release. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Region string `json:"region"` + + // Zone is the availability zone where control plane cloud resources are + // created. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Zone string `json:"zone"` + + // Subnet is the subnet to use for control plane cloud resources. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Subnet *PowerVSResourceReference `json:"subnet"` + + // ServiceInstance is the reference to the Power VS service on which the server instance(VM) will be created. + // Power VS service is a container for all Power VS instances at a specific geographic region. + // serviceInstance can be created via IBM Cloud catalog or CLI. + // ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud UI or IBM Cloud cli. + // + // More detail about Power VS service instance. + // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + // + // This field is immutable. Once set, It can't be changed. + // + // +immutable + ServiceInstanceID string `json:"serviceInstanceID"` + + // VPC specifies IBM Cloud PowerVS Load Balancing configuration for the control + // plane. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + VPC *PowerVSVPC `json:"vpc"` + + // KubeCloudControllerCreds is a reference to a secret containing cloud + // credentials with permissions matching the cloud controller policy. + // This field is immutable. Once set, It can't be changed. + // + // TODO(dan): document the "cloud controller policy" + // + // +immutable + KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"` + + // NodePoolManagementCreds is a reference to a secret containing cloud + // credentials with permissions matching the node pool management policy. + // This field is immutable. Once set, It can't be changed. + // + // TODO(dan): document the "node pool management policy" + // + // +immutable + NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"` + + // IngressOperatorCloudCreds is a reference to a secret containing ibm cloud + // credentials for ingress operator to get authenticated with ibm cloud. + // + // +immutable + IngressOperatorCloudCreds corev1.LocalObjectReference `json:"ingressOperatorCloudCreds"` + + // StorageOperatorCloudCreds is a reference to a secret containing ibm cloud + // credentials for storage operator to get authenticated with ibm cloud. + // + // +immutable + StorageOperatorCloudCreds corev1.LocalObjectReference `json:"storageOperatorCloudCreds"` + + // ImageRegistryOperatorCloudCreds is a reference to a secret containing ibm cloud + // credentials for image registry operator to get authenticated with ibm cloud. + // + // +immutable + ImageRegistryOperatorCloudCreds corev1.LocalObjectReference `json:"imageRegistryOperatorCloudCreds"` +} + +// PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control +// plane. +type PowerVSVPC struct { + // Name for VPC to used for all the service load balancer. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Name string `json:"name"` + + // Region is the IBMCloud region in which VPC gets created, this VPC used for all the ingress traffic + // into the OCP cluster. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Region string `json:"region"` + + // Zone is the availability zone where load balancer cloud resources are + // created. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + // +optional + Zone string `json:"zone,omitempty"` + + // Subnet is the subnet to use for load balancer. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + // +optional + Subnet string `json:"subnet,omitempty"` +} + +// PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name. +// Only one of ID, or Name may be specified. Specifying more than one will result in +// a validation error. +type PowerVSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Name of resource + // +optional + Name *string `json:"name,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..29628d778 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,3308 @@ +//go:build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESCBCSpec) DeepCopyInto(out *AESCBCSpec) { + *out = *in + out.ActiveKey = in.ActiveKey + if in.BackupKey != nil { + in, out := &in.BackupKey, &out.BackupKey + *out = new(corev1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESCBCSpec. +func (in *AESCBCSpec) DeepCopy() *AESCBCSpec { + if in == nil { + return nil + } + out := new(AESCBCSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerNetworking) DeepCopyInto(out *APIServerNetworking) { + *out = *in + if in.AdvertiseAddress != nil { + in, out := &in.AdvertiseAddress, &out.AdvertiseAddress + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.AllowedCIDRBlocks != nil { + in, out := &in.AllowedCIDRBlocks, &out.AllowedCIDRBlocks + *out = make([]CIDRBlock, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNetworking. +func (in *APIServerNetworking) DeepCopy() *APIServerNetworking { + if in == nil { + return nil + } + out := new(APIServerNetworking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCloudProviderConfig) DeepCopyInto(out *AWSCloudProviderConfig) { + *out = *in + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(AWSResourceReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCloudProviderConfig. +func (in *AWSCloudProviderConfig) DeepCopy() *AWSCloudProviderConfig { + if in == nil { + return nil + } + out := new(AWSCloudProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEndpointService) DeepCopyInto(out *AWSEndpointService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointService. +func (in *AWSEndpointService) DeepCopy() *AWSEndpointService { + if in == nil { + return nil + } + out := new(AWSEndpointService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSEndpointService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEndpointServiceList) DeepCopyInto(out *AWSEndpointServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSEndpointService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceList. +func (in *AWSEndpointServiceList) DeepCopy() *AWSEndpointServiceList { + if in == nil { + return nil + } + out := new(AWSEndpointServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSEndpointServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEndpointServiceSpec) DeepCopyInto(out *AWSEndpointServiceSpec) { + *out = *in + if in.SubnetIDs != nil { + in, out := &in.SubnetIDs, &out.SubnetIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceSpec. +func (in *AWSEndpointServiceSpec) DeepCopy() *AWSEndpointServiceSpec { + if in == nil { + return nil + } + out := new(AWSEndpointServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEndpointServiceStatus) DeepCopyInto(out *AWSEndpointServiceStatus) { + *out = *in + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceStatus. +func (in *AWSEndpointServiceStatus) DeepCopy() *AWSEndpointServiceStatus { + if in == nil { + return nil + } + out := new(AWSEndpointServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSKMSAuthSpec) DeepCopyInto(out *AWSKMSAuthSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSAuthSpec. +func (in *AWSKMSAuthSpec) DeepCopy() *AWSKMSAuthSpec { + if in == nil { + return nil + } + out := new(AWSKMSAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSKMSKeyEntry) DeepCopyInto(out *AWSKMSKeyEntry) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSKeyEntry. +func (in *AWSKMSKeyEntry) DeepCopy() *AWSKMSKeyEntry { + if in == nil { + return nil + } + out := new(AWSKMSKeyEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSKMSSpec) DeepCopyInto(out *AWSKMSSpec) { + *out = *in + out.ActiveKey = in.ActiveKey + if in.BackupKey != nil { + in, out := &in.BackupKey, &out.BackupKey + *out = new(AWSKMSKeyEntry) + **out = **in + } + out.Auth = in.Auth +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSSpec. +func (in *AWSKMSSpec) DeepCopy() *AWSKMSSpec { + if in == nil { + return nil + } + out := new(AWSKMSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSNodePoolPlatform) DeepCopyInto(out *AWSNodePoolPlatform) { + *out = *in + in.Subnet.DeepCopyInto(&out.Subnet) + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]AWSResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(PlacementOptions) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNodePoolPlatform. +func (in *AWSNodePoolPlatform) DeepCopy() *AWSNodePoolPlatform { + if in == nil { + return nil + } + out := new(AWSNodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { + *out = *in + if in.CloudProviderConfig != nil { + in, out := &in.CloudProviderConfig, &out.CloudProviderConfig + *out = new(AWSCloudProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + out.RolesRef = in.RolesRef + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + if in.AdditionalAllowedPrincipals != nil { + in, out := &in.AdditionalAllowedPrincipals, &out.AdditionalAllowedPrincipals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SharedVPC != nil { + in, out := &in.SharedVPC, &out.SharedVPC + *out = new(AWSSharedVPC) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. +func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { + if in == nil { + return nil + } + out := new(AWSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. +func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { + if in == nil { + return nil + } + out := new(AWSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSRoleCredentials) DeepCopyInto(out *AWSRoleCredentials) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRoleCredentials. +func (in *AWSRoleCredentials) DeepCopy() *AWSRoleCredentials { + if in == nil { + return nil + } + out := new(AWSRoleCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSRolesRef) DeepCopyInto(out *AWSRolesRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRolesRef. +func (in *AWSRolesRef) DeepCopy() *AWSRolesRef { + if in == nil { + return nil + } + out := new(AWSRolesRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. +func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { + if in == nil { + return nil + } + out := new(AWSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSharedVPC) DeepCopyInto(out *AWSSharedVPC) { + *out = *in + out.RolesRef = in.RolesRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSharedVPC. +func (in *AWSSharedVPC) DeepCopy() *AWSSharedVPC { + if in == nil { + return nil + } + out := new(AWSSharedVPC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSharedVPCRolesRef) DeepCopyInto(out *AWSSharedVPCRolesRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSharedVPCRolesRef. +func (in *AWSSharedVPCRolesRef) DeepCopy() *AWSSharedVPCRolesRef { + if in == nil { + return nil + } + out := new(AWSSharedVPCRolesRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressPair) DeepCopyInto(out *AddressPair) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressPair. +func (in *AddressPair) DeepCopy() *AddressPair { + if in == nil { + return nil + } + out := new(AddressPair) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentNodePoolPlatform) DeepCopyInto(out *AgentNodePoolPlatform) { + *out = *in + if in.AgentLabelSelector != nil { + in, out := &in.AgentLabelSelector, &out.AgentLabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNodePoolPlatform. +func (in *AgentNodePoolPlatform) DeepCopy() *AgentNodePoolPlatform { + if in == nil { + return nil + } + out := new(AgentNodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentPlatformSpec) DeepCopyInto(out *AgentPlatformSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentPlatformSpec. +func (in *AgentPlatformSpec) DeepCopy() *AgentPlatformSpec { + if in == nil { + return nil + } + out := new(AgentPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationPool) DeepCopyInto(out *AllocationPool) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationPool. +func (in *AllocationPool) DeepCopy() *AllocationPool { + if in == nil { + return nil + } + out := new(AllocationPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureKMSKey) DeepCopyInto(out *AzureKMSKey) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKMSKey. +func (in *AzureKMSKey) DeepCopy() *AzureKMSKey { + if in == nil { + return nil + } + out := new(AzureKMSKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureKMSSpec) DeepCopyInto(out *AzureKMSSpec) { + *out = *in + out.ActiveKey = in.ActiveKey + if in.BackupKey != nil { + in, out := &in.BackupKey, &out.BackupKey + *out = new(AzureKMSKey) + **out = **in + } + out.KMS = in.KMS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKMSSpec. +func (in *AzureKMSSpec) DeepCopy() *AzureKMSSpec { + if in == nil { + return nil + } + out := new(AzureKMSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMarketplaceImage) DeepCopyInto(out *AzureMarketplaceImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMarketplaceImage. +func (in *AzureMarketplaceImage) DeepCopy() *AzureMarketplaceImage { + if in == nil { + return nil + } + out := new(AzureMarketplaceImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureNodePoolOSDisk) DeepCopyInto(out *AzureNodePoolOSDisk) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNodePoolOSDisk. +func (in *AzureNodePoolOSDisk) DeepCopy() *AzureNodePoolOSDisk { + if in == nil { + return nil + } + out := new(AzureNodePoolOSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureNodePoolPlatform) DeepCopyInto(out *AzureNodePoolPlatform) { + *out = *in + in.Image.DeepCopyInto(&out.Image) + out.OSDisk = in.OSDisk + if in.Diagnostics != nil { + in, out := &in.Diagnostics, &out.Diagnostics + *out = new(Diagnostics) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNodePoolPlatform. +func (in *AzureNodePoolPlatform) DeepCopy() *AzureNodePoolPlatform { + if in == nil { + return nil + } + out := new(AzureNodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { + *out = *in + out.Credentials = in.Credentials + out.ManagedIdentities = in.ManagedIdentities +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. +func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { + if in == nil { + return nil + } + out := new(AzurePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureResourceManagedIdentities) DeepCopyInto(out *AzureResourceManagedIdentities) { + *out = *in + out.ControlPlane = in.ControlPlane +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceManagedIdentities. +func (in *AzureResourceManagedIdentities) DeepCopy() *AzureResourceManagedIdentities { + if in == nil { + return nil + } + out := new(AzureResourceManagedIdentities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureVMImage) DeepCopyInto(out *AzureVMImage) { + *out = *in + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.AzureMarketplace != nil { + in, out := &in.AzureMarketplace, &out.AzureMarketplace + *out = new(AzureMarketplaceImage) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureVMImage. +func (in *AzureVMImage) DeepCopy() *AzureVMImage { + if in == nil { + return nil + } + out := new(AzureVMImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApproval) DeepCopyInto(out *CertificateSigningRequestApproval) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApproval. +func (in *CertificateSigningRequestApproval) DeepCopy() *CertificateSigningRequestApproval { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApproval) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestApproval) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalList) DeepCopyInto(out *CertificateSigningRequestApprovalList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequestApproval, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalList. +func (in *CertificateSigningRequestApprovalList) DeepCopy() *CertificateSigningRequestApprovalList { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestApprovalList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalSpec) DeepCopyInto(out *CertificateSigningRequestApprovalSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalSpec. +func (in *CertificateSigningRequestApprovalSpec) DeepCopy() *CertificateSigningRequestApprovalSpec { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalStatus) DeepCopyInto(out *CertificateSigningRequestApprovalStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalStatus. +func (in *CertificateSigningRequestApprovalStatus) DeepCopy() *CertificateSigningRequestApprovalStatus { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscaling) DeepCopyInto(out *ClusterAutoscaling) { + *out = *in + if in.MaxNodesTotal != nil { + in, out := &in.MaxNodesTotal, &out.MaxNodesTotal + *out = new(int32) + **out = **in + } + if in.MaxPodGracePeriod != nil { + in, out := &in.MaxPodGracePeriod, &out.MaxPodGracePeriod + *out = new(int32) + **out = **in + } + if in.PodPriorityThreshold != nil { + in, out := &in.PodPriorityThreshold, &out.PodPriorityThreshold + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaling. +func (in *ClusterAutoscaling) DeepCopy() *ClusterAutoscaling { + if in == nil { + return nil + } + out := new(ClusterAutoscaling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) { + *out = *in + if in.APIServer != nil { + in, out := &in.APIServer, &out.APIServer + *out = new(configv1.APIServerSpec) + (*in).DeepCopyInto(*out) + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(configv1.AuthenticationSpec) + (*in).DeepCopyInto(*out) + } + if in.FeatureGate != nil { + in, out := &in.FeatureGate, &out.FeatureGate + *out = new(configv1.FeatureGateSpec) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(configv1.ImageSpec) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(configv1.IngressSpec) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(configv1.NetworkSpec) + (*in).DeepCopyInto(*out) + } + if in.OAuth != nil { + in, out := &in.OAuth, &out.OAuth + *out = new(configv1.OAuthSpec) + (*in).DeepCopyInto(*out) + } + if in.OperatorHub != nil { + in, out := &in.OperatorHub, &out.OperatorHub + *out = new(configv1.OperatorHubSpec) + (*in).DeepCopyInto(*out) + } + if in.Scheduler != nil { + in, out := &in.Scheduler, &out.Scheduler + *out = new(configv1.SchedulerSpec) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(configv1.ProxySpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfiguration. +func (in *ClusterConfiguration) DeepCopy() *ClusterConfiguration { + if in == nil { + return nil + } + out := new(ClusterConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + in.CIDR.DeepCopyInto(&out.CIDR) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworking) DeepCopyInto(out *ClusterNetworking) { + *out = *in + if in.MachineNetwork != nil { + in, out := &in.MachineNetwork, &out.MachineNetwork + *out = make([]MachineNetworkEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]ServiceNetworkEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.APIServer != nil { + in, out := &in.APIServer, &out.APIServer + *out = new(APIServerNetworking) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworking. +func (in *ClusterNetworking) DeepCopy() *ClusterNetworking { + if in == nil { + return nil + } + out := new(ClusterNetworking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { + *out = *in + in.Desired.DeepCopyInto(&out.Desired) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]configv1.UpdateHistory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]configv1.Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConditionalUpdates != nil { + in, out := &in.ConditionalUpdates, &out.ConditionalUpdates + *out = make([]configv1.ConditionalUpdate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. +func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { + if in == nil { + return nil + } + out := new(ClusterVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentResource) DeepCopyInto(out *ComponentResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentResource. +func (in *ComponentResource) DeepCopy() *ComponentResource { + if in == nil { + return nil + } + out := new(ComponentResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponent. +func (in *ControlPlaneComponent) DeepCopy() *ControlPlaneComponent { + if in == nil { + return nil + } + out := new(ControlPlaneComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneComponent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponentList) DeepCopyInto(out *ControlPlaneComponentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControlPlaneComponent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponentList. +func (in *ControlPlaneComponentList) DeepCopy() *ControlPlaneComponentList { + if in == nil { + return nil + } + out := new(ControlPlaneComponentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneComponentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponentSpec) DeepCopyInto(out *ControlPlaneComponentSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponentSpec. +func (in *ControlPlaneComponentSpec) DeepCopy() *ControlPlaneComponentSpec { + if in == nil { + return nil + } + out := new(ControlPlaneComponentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponentStatus) DeepCopyInto(out *ControlPlaneComponentStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ComponentResource, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponentStatus. +func (in *ControlPlaneComponentStatus) DeepCopy() *ControlPlaneComponentStatus { + if in == nil { + return nil + } + out := new(ControlPlaneComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneManagedIdentities) DeepCopyInto(out *ControlPlaneManagedIdentities) { + *out = *in + out.ManagedIdentitiesKeyVault = in.ManagedIdentitiesKeyVault + out.CloudProvider = in.CloudProvider + out.NodePoolManagement = in.NodePoolManagement + out.ControlPlaneOperator = in.ControlPlaneOperator + out.ImageRegistry = in.ImageRegistry + out.Ingress = in.Ingress + out.Network = in.Network + out.Disk = in.Disk + out.File = in.File +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneManagedIdentities. +func (in *ControlPlaneManagedIdentities) DeepCopy() *ControlPlaneManagedIdentities { + if in == nil { + return nil + } + out := new(ControlPlaneManagedIdentities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.BaseDomainPrefix != nil { + in, out := &in.BaseDomainPrefix, &out.BaseDomainPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Diagnostics) DeepCopyInto(out *Diagnostics) { + *out = *in + if in.UserManaged != nil { + in, out := &in.UserManaged, &out.UserManaged + *out = new(UserManagedDiagnostics) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Diagnostics. +func (in *Diagnostics) DeepCopy() *Diagnostics { + if in == nil { + return nil + } + out := new(Diagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { + *out = *in + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(ManagedEtcdSpec) + (*in).DeepCopyInto(*out) + } + if in.Unmanaged != nil { + in, out := &in.Unmanaged, &out.Unmanaged + *out = new(UnmanagedEtcdSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. +func (in *EtcdSpec) DeepCopy() *EtcdSpec { + if in == nil { + return nil + } + out := new(EtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdTLSConfig) DeepCopyInto(out *EtcdTLSConfig) { + *out = *in + out.ClientSecret = in.ClientSecret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdTLSConfig. +func (in *EtcdTLSConfig) DeepCopy() *EtcdTLSConfig { + if in == nil { + return nil + } + out := new(EtcdTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterByNeutronTags) DeepCopyInto(out *FilterByNeutronTags) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]NeutronTag, len(*in)) + copy(*out, *in) + } + if in.TagsAny != nil { + in, out := &in.TagsAny, &out.TagsAny + *out = make([]NeutronTag, len(*in)) + copy(*out, *in) + } + if in.NotTags != nil { + in, out := &in.NotTags, &out.NotTags + *out = make([]NeutronTag, len(*in)) + copy(*out, *in) + } + if in.NotTagsAny != nil { + in, out := &in.NotTagsAny, &out.NotTagsAny + *out = make([]NeutronTag, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterByNeutronTags. +func (in *FilterByNeutronTags) DeepCopy() *FilterByNeutronTags { + if in == nil { + return nil + } + out := new(FilterByNeutronTags) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedCluster) DeepCopyInto(out *HostedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedCluster. +func (in *HostedCluster) DeepCopy() *HostedCluster { + if in == nil { + return nil + } + out := new(HostedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedClusterList) DeepCopyInto(out *HostedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterList. +func (in *HostedClusterList) DeepCopy() *HostedClusterList { + if in == nil { + return nil + } + out := new(HostedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedClusterSpec) DeepCopyInto(out *HostedClusterSpec) { + *out = *in + out.Release = in.Release + if in.ControlPlaneRelease != nil { + in, out := &in.ControlPlaneRelease, &out.ControlPlaneRelease + *out = new(Release) + **out = **in + } + in.Platform.DeepCopyInto(&out.Platform) + in.DNS.DeepCopyInto(&out.DNS) + in.Networking.DeepCopyInto(&out.Networking) + in.Autoscaling.DeepCopyInto(&out.Autoscaling) + in.Etcd.DeepCopyInto(&out.Etcd) + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServicePublishingStrategyMapping, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.PullSecret = in.PullSecret + out.SSHKey = in.SSHKey + if in.ServiceAccountSigningKey != nil { + in, out := &in.ServiceAccountSigningKey, &out.ServiceAccountSigningKey + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ClusterConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AuditWebhook != nil { + in, out := &in.AuditWebhook, &out.AuditWebhook + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ImageContentSources != nil { + in, out := &in.ImageContentSources, &out.ImageContentSources + *out = make([]ImageContentSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalTrustBundle != nil { + in, out := &in.AdditionalTrustBundle, &out.AdditionalTrustBundle + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.SecretEncryption != nil { + in, out := &in.SecretEncryption, &out.SecretEncryption + *out = new(SecretEncryptionSpec) + (*in).DeepCopyInto(*out) + } + if in.PausedUntil != nil { + in, out := &in.PausedUntil, &out.PausedUntil + *out = new(string) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterSpec. +func (in *HostedClusterSpec) DeepCopy() *HostedClusterSpec { + if in == nil { + return nil + } + out := new(HostedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedClusterStatus) DeepCopyInto(out *HostedClusterStatus) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(ClusterVersionStatus) + (*in).DeepCopyInto(*out) + } + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.KubeadminPassword != nil { + in, out := &in.KubeadminPassword, &out.KubeadminPassword + *out = new(corev1.LocalObjectReference) + **out = **in + } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterStatus. +func (in *HostedClusterStatus) DeepCopy() *HostedClusterStatus { + if in == nil { + return nil + } + out := new(HostedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedControlPlane) DeepCopyInto(out *HostedControlPlane) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlane. +func (in *HostedControlPlane) DeepCopy() *HostedControlPlane { + if in == nil { + return nil + } + out := new(HostedControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostedControlPlane) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedControlPlaneList) DeepCopyInto(out *HostedControlPlaneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostedControlPlane, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneList. +func (in *HostedControlPlaneList) DeepCopy() *HostedControlPlaneList { + if in == nil { + return nil + } + out := new(HostedControlPlaneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostedControlPlaneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedControlPlaneSpec) DeepCopyInto(out *HostedControlPlaneSpec) { + *out = *in + if in.ControlPlaneReleaseImage != nil { + in, out := &in.ControlPlaneReleaseImage, &out.ControlPlaneReleaseImage + *out = new(string) + **out = **in + } + out.PullSecret = in.PullSecret + in.Networking.DeepCopyInto(&out.Networking) + out.SSHKey = in.SSHKey + in.Platform.DeepCopyInto(&out.Platform) + in.DNS.DeepCopyInto(&out.DNS) + if in.ServiceAccountSigningKey != nil { + in, out := &in.ServiceAccountSigningKey, &out.ServiceAccountSigningKey + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = new(KubeconfigSecretRef) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServicePublishingStrategyMapping, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuditWebhook != nil { + in, out := &in.AuditWebhook, &out.AuditWebhook + *out = new(corev1.LocalObjectReference) + **out = **in + } + in.Etcd.DeepCopyInto(&out.Etcd) + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ClusterConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ImageContentSources != nil { + in, out := &in.ImageContentSources, &out.ImageContentSources + *out = make([]ImageContentSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalTrustBundle != nil { + in, out := &in.AdditionalTrustBundle, &out.AdditionalTrustBundle + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.SecretEncryption != nil { + in, out := &in.SecretEncryption, &out.SecretEncryption + *out = new(SecretEncryptionSpec) + (*in).DeepCopyInto(*out) + } + if in.PausedUntil != nil { + in, out := &in.PausedUntil, &out.PausedUntil + *out = new(string) + **out = **in + } + in.Autoscaling.DeepCopyInto(&out.Autoscaling) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneSpec. +func (in *HostedControlPlaneSpec) DeepCopy() *HostedControlPlaneSpec { + if in == nil { + return nil + } + out := new(HostedControlPlaneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostedControlPlaneStatus) DeepCopyInto(out *HostedControlPlaneStatus) { + *out = *in + if in.ExternalManagedControlPlane != nil { + in, out := &in.ExternalManagedControlPlane, &out.ExternalManagedControlPlane + *out = new(bool) + **out = **in + } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if in.VersionStatus != nil { + in, out := &in.VersionStatus, &out.VersionStatus + *out = new(ClusterVersionStatus) + (*in).DeepCopyInto(*out) + } + if in.LastReleaseImageTransitionTime != nil { + in, out := &in.LastReleaseImageTransitionTime, &out.LastReleaseImageTransitionTime + *out = (*in).DeepCopy() + } + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = new(KubeconfigSecretRef) + **out = **in + } + if in.KubeadminPassword != nil { + in, out := &in.KubeadminPassword, &out.KubeadminPassword + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneStatus. +func (in *HostedControlPlaneStatus) DeepCopy() *HostedControlPlaneStatus { + if in == nil { + return nil + } + out := new(HostedControlPlaneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudKMSAuthSpec) DeepCopyInto(out *IBMCloudKMSAuthSpec) { + *out = *in + if in.Unmanaged != nil { + in, out := &in.Unmanaged, &out.Unmanaged + *out = new(IBMCloudKMSUnmanagedAuthSpec) + **out = **in + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(IBMCloudKMSManagedAuthSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSAuthSpec. +func (in *IBMCloudKMSAuthSpec) DeepCopy() *IBMCloudKMSAuthSpec { + if in == nil { + return nil + } + out := new(IBMCloudKMSAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudKMSKeyEntry) DeepCopyInto(out *IBMCloudKMSKeyEntry) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSKeyEntry. +func (in *IBMCloudKMSKeyEntry) DeepCopy() *IBMCloudKMSKeyEntry { + if in == nil { + return nil + } + out := new(IBMCloudKMSKeyEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudKMSManagedAuthSpec) DeepCopyInto(out *IBMCloudKMSManagedAuthSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSManagedAuthSpec. +func (in *IBMCloudKMSManagedAuthSpec) DeepCopy() *IBMCloudKMSManagedAuthSpec { + if in == nil { + return nil + } + out := new(IBMCloudKMSManagedAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudKMSSpec) DeepCopyInto(out *IBMCloudKMSSpec) { + *out = *in + in.Auth.DeepCopyInto(&out.Auth) + if in.KeyList != nil { + in, out := &in.KeyList, &out.KeyList + *out = make([]IBMCloudKMSKeyEntry, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSSpec. +func (in *IBMCloudKMSSpec) DeepCopy() *IBMCloudKMSSpec { + if in == nil { + return nil + } + out := new(IBMCloudKMSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudKMSUnmanagedAuthSpec) DeepCopyInto(out *IBMCloudKMSUnmanagedAuthSpec) { + *out = *in + out.Credentials = in.Credentials +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSUnmanagedAuthSpec. +func (in *IBMCloudKMSUnmanagedAuthSpec) DeepCopy() *IBMCloudKMSUnmanagedAuthSpec { + if in == nil { + return nil + } + out := new(IBMCloudKMSUnmanagedAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. +func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { + if in == nil { + return nil + } + out := new(IBMCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSource) DeepCopyInto(out *ImageContentSource) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSource. +func (in *ImageContentSource) DeepCopy() *ImageContentSource { + if in == nil { + return nil + } + out := new(ImageContentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpgrade) DeepCopyInto(out *InPlaceUpgrade) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpgrade. +func (in *InPlaceUpgrade) DeepCopy() *InPlaceUpgrade { + if in == nil { + return nil + } + out := new(InPlaceUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSSpec) DeepCopyInto(out *KMSSpec) { + *out = *in + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudKMSSpec) + (*in).DeepCopyInto(*out) + } + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSKMSSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureKMSSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSSpec. +func (in *KMSSpec) DeepCopy() *KMSSpec { + if in == nil { + return nil + } + out := new(KMSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtNodePoolStatus) DeepCopyInto(out *KubeVirtNodePoolStatus) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(KubevirtPlatformCredentials) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtNodePoolStatus. +func (in *KubeVirtNodePoolStatus) DeepCopy() *KubeVirtNodePoolStatus { + if in == nil { + return nil + } + out := new(KubeVirtNodePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigSecretRef) DeepCopyInto(out *KubeconfigSecretRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigSecretRef. +func (in *KubeconfigSecretRef) DeepCopy() *KubeconfigSecretRef { + if in == nil { + return nil + } + out := new(KubeconfigSecretRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtCachingStrategy) DeepCopyInto(out *KubevirtCachingStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCachingStrategy. +func (in *KubevirtCachingStrategy) DeepCopy() *KubevirtCachingStrategy { + if in == nil { + return nil + } + out := new(KubevirtCachingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtCompute) DeepCopyInto(out *KubevirtCompute) { + *out = *in + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + x := (*in).DeepCopy() + *out = &x + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(uint32) + **out = **in + } + if in.QosClass != nil { + in, out := &in.QosClass, &out.QosClass + *out = new(QoSClass) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCompute. +func (in *KubevirtCompute) DeepCopy() *KubevirtCompute { + if in == nil { + return nil + } + out := new(KubevirtCompute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtDiskImage) DeepCopyInto(out *KubevirtDiskImage) { + *out = *in + if in.ContainerDiskImage != nil { + in, out := &in.ContainerDiskImage, &out.ContainerDiskImage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtDiskImage. +func (in *KubevirtDiskImage) DeepCopy() *KubevirtDiskImage { + if in == nil { + return nil + } + out := new(KubevirtDiskImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtHostDevice) DeepCopyInto(out *KubevirtHostDevice) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtHostDevice. +func (in *KubevirtHostDevice) DeepCopy() *KubevirtHostDevice { + if in == nil { + return nil + } + out := new(KubevirtHostDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtManualStorageDriverConfig) DeepCopyInto(out *KubevirtManualStorageDriverConfig) { + *out = *in + if in.StorageClassMapping != nil { + in, out := &in.StorageClassMapping, &out.StorageClassMapping + *out = make([]KubevirtStorageClassMapping, len(*in)) + copy(*out, *in) + } + if in.VolumeSnapshotClassMapping != nil { + in, out := &in.VolumeSnapshotClassMapping, &out.VolumeSnapshotClassMapping + *out = make([]KubevirtVolumeSnapshotClassMapping, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtManualStorageDriverConfig. +func (in *KubevirtManualStorageDriverConfig) DeepCopy() *KubevirtManualStorageDriverConfig { + if in == nil { + return nil + } + out := new(KubevirtManualStorageDriverConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtNetwork) DeepCopyInto(out *KubevirtNetwork) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNetwork. +func (in *KubevirtNetwork) DeepCopy() *KubevirtNetwork { + if in == nil { + return nil + } + out := new(KubevirtNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtNodePoolPlatform) DeepCopyInto(out *KubevirtNodePoolPlatform) { + *out = *in + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(KubevirtRootVolume) + (*in).DeepCopyInto(*out) + } + if in.Compute != nil { + in, out := &in.Compute, &out.Compute + *out = new(KubevirtCompute) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceMultiQueue != nil { + in, out := &in.NetworkInterfaceMultiQueue, &out.NetworkInterfaceMultiQueue + *out = new(MultiQueueSetting) + **out = **in + } + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]KubevirtNetwork, len(*in)) + copy(*out, *in) + } + if in.AttachDefaultNetwork != nil { + in, out := &in.AttachDefaultNetwork, &out.AttachDefaultNetwork + *out = new(bool) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubevirtHostDevices != nil { + in, out := &in.KubevirtHostDevices, &out.KubevirtHostDevices + *out = make([]KubevirtHostDevice, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodePoolPlatform. +func (in *KubevirtNodePoolPlatform) DeepCopy() *KubevirtNodePoolPlatform { + if in == nil { + return nil + } + out := new(KubevirtNodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPersistentVolume) DeepCopyInto(out *KubevirtPersistentVolume) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + x := (*in).DeepCopy() + *out = &x + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(corev1.PersistentVolumeMode) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPersistentVolume. +func (in *KubevirtPersistentVolume) DeepCopy() *KubevirtPersistentVolume { + if in == nil { + return nil + } + out := new(KubevirtPersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformCredentials) DeepCopyInto(out *KubevirtPlatformCredentials) { + *out = *in + if in.InfraKubeConfigSecret != nil { + in, out := &in.InfraKubeConfigSecret, &out.InfraKubeConfigSecret + *out = new(KubeconfigSecretRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformCredentials. +func (in *KubevirtPlatformCredentials) DeepCopy() *KubevirtPlatformCredentials { + if in == nil { + return nil + } + out := new(KubevirtPlatformCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { + *out = *in + if in.BaseDomainPassthrough != nil { + in, out := &in.BaseDomainPassthrough, &out.BaseDomainPassthrough + *out = new(bool) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(KubevirtPlatformCredentials) + (*in).DeepCopyInto(*out) + } + if in.StorageDriver != nil { + in, out := &in.StorageDriver, &out.StorageDriver + *out = new(KubevirtStorageDriverSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. +func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { + if in == nil { + return nil + } + out := new(KubevirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtRootVolume) DeepCopyInto(out *KubevirtRootVolume) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(KubevirtDiskImage) + (*in).DeepCopyInto(*out) + } + in.KubevirtVolume.DeepCopyInto(&out.KubevirtVolume) + if in.CacheStrategy != nil { + in, out := &in.CacheStrategy, &out.CacheStrategy + *out = new(KubevirtCachingStrategy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtRootVolume. +func (in *KubevirtRootVolume) DeepCopy() *KubevirtRootVolume { + if in == nil { + return nil + } + out := new(KubevirtRootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtStorageClassMapping) DeepCopyInto(out *KubevirtStorageClassMapping) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtStorageClassMapping. +func (in *KubevirtStorageClassMapping) DeepCopy() *KubevirtStorageClassMapping { + if in == nil { + return nil + } + out := new(KubevirtStorageClassMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtStorageDriverSpec) DeepCopyInto(out *KubevirtStorageDriverSpec) { + *out = *in + if in.Manual != nil { + in, out := &in.Manual, &out.Manual + *out = new(KubevirtManualStorageDriverConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtStorageDriverSpec. +func (in *KubevirtStorageDriverSpec) DeepCopy() *KubevirtStorageDriverSpec { + if in == nil { + return nil + } + out := new(KubevirtStorageDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtVolume) DeepCopyInto(out *KubevirtVolume) { + *out = *in + if in.Persistent != nil { + in, out := &in.Persistent, &out.Persistent + *out = new(KubevirtPersistentVolume) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtVolume. +func (in *KubevirtVolume) DeepCopy() *KubevirtVolume { + if in == nil { + return nil + } + out := new(KubevirtVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtVolumeSnapshotClassMapping) DeepCopyInto(out *KubevirtVolumeSnapshotClassMapping) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtVolumeSnapshotClassMapping. +func (in *KubevirtVolumeSnapshotClassMapping) DeepCopy() *KubevirtVolumeSnapshotClassMapping { + if in == nil { + return nil + } + out := new(KubevirtVolumeSnapshotClassMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerPublishingStrategy) DeepCopyInto(out *LoadBalancerPublishingStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerPublishingStrategy. +func (in *LoadBalancerPublishingStrategy) DeepCopy() *LoadBalancerPublishingStrategy { + if in == nil { + return nil + } + out := new(LoadBalancerPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineNetworkEntry) DeepCopyInto(out *MachineNetworkEntry) { + *out = *in + in.CIDR.DeepCopyInto(&out.CIDR) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineNetworkEntry. +func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry { + if in == nil { + return nil + } + out := new(MachineNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedAzureKeyVault) DeepCopyInto(out *ManagedAzureKeyVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedAzureKeyVault. +func (in *ManagedAzureKeyVault) DeepCopy() *ManagedAzureKeyVault { + if in == nil { + return nil + } + out := new(ManagedAzureKeyVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedEtcdSpec) DeepCopyInto(out *ManagedEtcdSpec) { + *out = *in + in.Storage.DeepCopyInto(&out.Storage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedEtcdSpec. +func (in *ManagedEtcdSpec) DeepCopy() *ManagedEtcdSpec { + if in == nil { + return nil + } + out := new(ManagedEtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedEtcdStorageSpec) DeepCopyInto(out *ManagedEtcdStorageSpec) { + *out = *in + if in.PersistentVolume != nil { + in, out := &in.PersistentVolume, &out.PersistentVolume + *out = new(PersistentVolumeEtcdStorageSpec) + (*in).DeepCopyInto(*out) + } + if in.RestoreSnapshotURL != nil { + in, out := &in.RestoreSnapshotURL, &out.RestoreSnapshotURL + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedEtcdStorageSpec. +func (in *ManagedEtcdStorageSpec) DeepCopy() *ManagedEtcdStorageSpec { + if in == nil { + return nil + } + out := new(ManagedEtcdStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentity) DeepCopyInto(out *ManagedIdentity) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentity. +func (in *ManagedIdentity) DeepCopy() *ManagedIdentity { + if in == nil { + return nil + } + out := new(ManagedIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkFilter) DeepCopyInto(out *NetworkFilter) { + *out = *in + in.FilterByNeutronTags.DeepCopyInto(&out.FilterByNeutronTags) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkFilter. +func (in *NetworkFilter) DeepCopy() *NetworkFilter { + if in == nil { + return nil + } + out := new(NetworkFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParam) DeepCopyInto(out *NetworkParam) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(NetworkFilter) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParam. +func (in *NetworkParam) DeepCopy() *NetworkParam { + if in == nil { + return nil + } + out := new(NetworkParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePool) DeepCopyInto(out *NodePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePool. +func (in *NodePool) DeepCopy() *NodePool { + if in == nil { + return nil + } + out := new(NodePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolAutoScaling) DeepCopyInto(out *NodePoolAutoScaling) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolAutoScaling. +func (in *NodePoolAutoScaling) DeepCopy() *NodePoolAutoScaling { + if in == nil { + return nil + } + out := new(NodePoolAutoScaling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolCondition) DeepCopyInto(out *NodePoolCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolCondition. +func (in *NodePoolCondition) DeepCopy() *NodePoolCondition { + if in == nil { + return nil + } + out := new(NodePoolCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolList) DeepCopyInto(out *NodePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolList. +func (in *NodePoolList) DeepCopy() *NodePoolList { + if in == nil { + return nil + } + out := new(NodePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolManagement) DeepCopyInto(out *NodePoolManagement) { + *out = *in + if in.Replace != nil { + in, out := &in.Replace, &out.Replace + *out = new(ReplaceUpgrade) + (*in).DeepCopyInto(*out) + } + if in.InPlace != nil { + in, out := &in.InPlace, &out.InPlace + *out = new(InPlaceUpgrade) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolManagement. +func (in *NodePoolManagement) DeepCopy() *NodePoolManagement { + if in == nil { + return nil + } + out := new(NodePoolManagement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolPlatform) DeepCopyInto(out *NodePoolPlatform) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSNodePoolPlatform) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtNodePoolPlatform) + (*in).DeepCopyInto(*out) + } + if in.Agent != nil { + in, out := &in.Agent, &out.Agent + *out = new(AgentNodePoolPlatform) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureNodePoolPlatform) + (*in).DeepCopyInto(*out) + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSNodePoolPlatform) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackNodePoolPlatform) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatform. +func (in *NodePoolPlatform) DeepCopy() *NodePoolPlatform { + if in == nil { + return nil + } + out := new(NodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolPlatformStatus) DeepCopyInto(out *NodePoolPlatformStatus) { + *out = *in + if in.KubeVirt != nil { + in, out := &in.KubeVirt, &out.KubeVirt + *out = new(KubeVirtNodePoolStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatformStatus. +func (in *NodePoolPlatformStatus) DeepCopy() *NodePoolPlatformStatus { + if in == nil { + return nil + } + out := new(NodePoolPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) { + *out = *in + out.Release = in.Release + in.Platform.DeepCopyInto(&out.Platform) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Management.DeepCopyInto(&out.Management) + if in.AutoScaling != nil { + in, out := &in.AutoScaling, &out.AutoScaling + *out = new(NodePoolAutoScaling) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.NodeDrainTimeout != nil { + in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout + *out = new(v1.Duration) + **out = **in + } + if in.NodeVolumeDetachTimeout != nil { + in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout + *out = new(v1.Duration) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + copy(*out, *in) + } + if in.PausedUntil != nil { + in, out := &in.PausedUntil, &out.PausedUntil + *out = new(string) + **out = **in + } + if in.TuningConfig != nil { + in, out := &in.TuningConfig, &out.TuningConfig + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolSpec. +func (in *NodePoolSpec) DeepCopy() *NodePoolSpec { + if in == nil { + return nil + } + out := new(NodePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) { + *out = *in + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(NodePoolPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodePoolCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolStatus. +func (in *NodePoolStatus) DeepCopy() *NodePoolStatus { + if in == nil { + return nil + } + out := new(NodePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortPublishingStrategy) DeepCopyInto(out *NodePortPublishingStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortPublishingStrategy. +func (in *NodePortPublishingStrategy) DeepCopy() *NodePortPublishingStrategy { + if in == nil { + return nil + } + out := new(NodePortPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackIdentityReference) DeepCopyInto(out *OpenStackIdentityReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackIdentityReference. +func (in *OpenStackIdentityReference) DeepCopy() *OpenStackIdentityReference { + if in == nil { + return nil + } + out := new(OpenStackIdentityReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackNodePoolPlatform) DeepCopyInto(out *OpenStackNodePoolPlatform) { + *out = *in + if in.AdditionalPorts != nil { + in, out := &in.AdditionalPorts, &out.AdditionalPorts + *out = make([]PortSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackNodePoolPlatform. +func (in *OpenStackNodePoolPlatform) DeepCopy() *OpenStackNodePoolPlatform { + if in == nil { + return nil + } + out := new(OpenStackNodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { + *out = *in + out.IdentityRef = in.IdentityRef + if in.ManagedSubnets != nil { + in, out := &in.ManagedSubnets, &out.ManagedSubnets + *out = make([]SubnetSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Router != nil { + in, out := &in.Router, &out.Router + *out = new(RouterParam) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkParam) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]SubnetParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkMTU != nil { + in, out := &in.NetworkMTU, &out.NetworkMTU + *out = new(int) + **out = **in + } + if in.ExternalNetwork != nil { + in, out := &in.ExternalNetwork, &out.ExternalNetwork + *out = new(NetworkParam) + (*in).DeepCopyInto(*out) + } + if in.DisableExternalNetwork != nil { + in, out := &in.DisableExternalNetwork, &out.DisableExternalNetwork + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. +func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { + if in == nil { + return nil + } + out := new(OpenStackPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeEtcdStorageSpec) DeepCopyInto(out *PersistentVolumeEtcdStorageSpec) { + *out = *in + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeEtcdStorageSpec. +func (in *PersistentVolumeEtcdStorageSpec) DeepCopy() *PersistentVolumeEtcdStorageSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeEtcdStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementOptions) DeepCopyInto(out *PlacementOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementOptions. +func (in *PlacementOptions) DeepCopy() *PlacementOptions { + if in == nil { + return nil + } + out := new(PlacementOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Agent != nil { + in, out := &in.Agent, &out.Agent + *out = new(AgentPlatformSpec) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformSpec) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSpec) DeepCopyInto(out *PortSpec) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkParam) + (*in).DeepCopyInto(*out) + } + if in.AllowedAddressPairs != nil { + in, out := &in.AllowedAddressPairs, &out.AllowedAddressPairs + *out = make([]AddressPair, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSpec. +func (in *PortSpec) DeepCopy() *PortSpec { + if in == nil { + return nil + } + out := new(PortSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSNodePoolPlatform) DeepCopyInto(out *PowerVSNodePoolPlatform) { + *out = *in + out.Processors = in.Processors + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(PowerVSResourceReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSNodePoolPlatform. +func (in *PowerVSNodePoolPlatform) DeepCopy() *PowerVSNodePoolPlatform { + if in == nil { + return nil + } + out := new(PowerVSNodePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { + *out = *in + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(PowerVSResourceReference) + (*in).DeepCopyInto(*out) + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(PowerVSVPC) + **out = **in + } + out.KubeCloudControllerCreds = in.KubeCloudControllerCreds + out.NodePoolManagementCreds = in.NodePoolManagementCreds + out.IngressOperatorCloudCreds = in.IngressOperatorCloudCreds + out.StorageOperatorCloudCreds = in.StorageOperatorCloudCreds + out.ImageRegistryOperatorCloudCreds = in.ImageRegistryOperatorCloudCreds +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. +func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { + if in == nil { + return nil + } + out := new(PowerVSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSResourceReference) DeepCopyInto(out *PowerVSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSResourceReference. +func (in *PowerVSResourceReference) DeepCopy() *PowerVSResourceReference { + if in == nil { + return nil + } + out := new(PowerVSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSVPC) DeepCopyInto(out *PowerVSVPC) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSVPC. +func (in *PowerVSVPC) DeepCopy() *PowerVSVPC { + if in == nil { + return nil + } + out := new(PowerVSVPC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplaceUpgrade) DeepCopyInto(out *ReplaceUpgrade) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplaceUpgrade. +func (in *ReplaceUpgrade) DeepCopy() *ReplaceUpgrade { + if in == nil { + return nil + } + out := new(ReplaceUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. +func (in *RollingUpdate) DeepCopy() *RollingUpdate { + if in == nil { + return nil + } + out := new(RollingUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePublishingStrategy) DeepCopyInto(out *RoutePublishingStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePublishingStrategy. +func (in *RoutePublishingStrategy) DeepCopy() *RoutePublishingStrategy { + if in == nil { + return nil + } + out := new(RoutePublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterFilter) DeepCopyInto(out *RouterFilter) { + *out = *in + in.FilterByNeutronTags.DeepCopyInto(&out.FilterByNeutronTags) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterFilter. +func (in *RouterFilter) DeepCopy() *RouterFilter { + if in == nil { + return nil + } + out := new(RouterFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterParam) DeepCopyInto(out *RouterParam) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(RouterFilter) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterParam. +func (in *RouterParam) DeepCopy() *RouterParam { + if in == nil { + return nil + } + out := new(RouterParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEncryptionSpec) DeepCopyInto(out *SecretEncryptionSpec) { + *out = *in + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSSpec) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESCBCSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEncryptionSpec. +func (in *SecretEncryptionSpec) DeepCopy() *SecretEncryptionSpec { + if in == nil { + return nil + } + out := new(SecretEncryptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceNetworkEntry) DeepCopyInto(out *ServiceNetworkEntry) { + *out = *in + in.CIDR.DeepCopyInto(&out.CIDR) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceNetworkEntry. +func (in *ServiceNetworkEntry) DeepCopy() *ServiceNetworkEntry { + if in == nil { + return nil + } + out := new(ServiceNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePublishingStrategy) DeepCopyInto(out *ServicePublishingStrategy) { + *out = *in + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortPublishingStrategy) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerPublishingStrategy) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(RoutePublishingStrategy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePublishingStrategy. +func (in *ServicePublishingStrategy) DeepCopy() *ServicePublishingStrategy { + if in == nil { + return nil + } + out := new(ServicePublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePublishingStrategyMapping) DeepCopyInto(out *ServicePublishingStrategyMapping) { + *out = *in + in.ServicePublishingStrategy.DeepCopyInto(&out.ServicePublishingStrategy) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePublishingStrategyMapping. +func (in *ServicePublishingStrategyMapping) DeepCopy() *ServicePublishingStrategyMapping { + if in == nil { + return nil + } + out := new(ServicePublishingStrategyMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) { + *out = *in + in.FilterByNeutronTags.DeepCopyInto(&out.FilterByNeutronTags) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter. +func (in *SubnetFilter) DeepCopy() *SubnetFilter { + if in == nil { + return nil + } + out := new(SubnetFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParam) DeepCopyInto(out *SubnetParam) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(SubnetFilter) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParam. +func (in *SubnetParam) DeepCopy() *SubnetParam { + if in == nil { + return nil + } + out := new(SubnetParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { + *out = *in + if in.DNSNameservers != nil { + in, out := &in.DNSNameservers, &out.DNSNameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllocationPools != nil { + in, out := &in.AllocationPools, &out.AllocationPools + *out = make([]AllocationPool, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec. +func (in *SubnetSpec) DeepCopy() *SubnetSpec { + if in == nil { + return nil + } + out := new(SubnetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedEtcdSpec) DeepCopyInto(out *UnmanagedEtcdSpec) { + *out = *in + out.TLS = in.TLS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedEtcdSpec. +func (in *UnmanagedEtcdSpec) DeepCopy() *UnmanagedEtcdSpec { + if in == nil { + return nil + } + out := new(UnmanagedEtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserManagedDiagnostics) DeepCopyInto(out *UserManagedDiagnostics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserManagedDiagnostics. +func (in *UserManagedDiagnostics) DeepCopy() *UserManagedDiagnostics { + if in == nil { + return nil + } + out := new(UserManagedDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..3eaaf2c4c --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,230 @@ +awsendpointservices.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: awsendpointservices.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: AWSEndpointService + Labels: {} + PluralName: awsendpointservices + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1beta1 + +certificatesigningrequestapprovals.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: certificatesigningrequestapprovals.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: false + KindName: CertificateSigningRequestApproval + Labels: {} + PluralName: certificatesigningrequestapprovals + PrinterColumns: [] + Scope: Namespaced + ShortNames: + - csra + - csras + TopLevelFeatureGates: [] + Version: v1beta1 + +controlplanecomponents.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: controlplanecomponents.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ControlPlaneV2 + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: ControlPlaneComponent + Labels: {} + PluralName: controlplanecomponents + PrinterColumns: + - description: Version + jsonPath: .status.version + name: Version + type: string + - description: Available + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Progressing + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Message + jsonPath: .status.conditions[?(@.type=="Available")].message + name: Message + type: string + - description: ProgressingMessage + jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: ProgressingMessage + priority: 1 + type: string + Scope: Namespaced + ShortNames: + - cpc + - cpcs + TopLevelFeatureGates: + - ControlPlaneV2 + Version: v1beta1 + +hostedclusters.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: hostedclusters.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AROHCPManagedIdentities + - DynamicResourceAllocation + - ExternalOIDC + - NetworkDiagnosticsConfig + - OpenStack + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: HostedCluster + Labels: {} + PluralName: hostedclusters + PrinterColumns: + - description: Version + jsonPath: .status.version.history[?(@.state=="Completed")].version + name: Version + type: string + - description: KubeConfig Secret + jsonPath: .status.kubeconfig.name + name: KubeConfig + type: string + - description: Progress + jsonPath: .status.version.history[?(@.state!="")].state + name: Progress + type: string + - description: Available + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Progressing + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Message + jsonPath: .status.conditions[?(@.type=="Available")].message + name: Message + type: string + Scope: Namespaced + ShortNames: + - hc + - hcs + TopLevelFeatureGates: [] + Version: v1beta1 + +hostedcontrolplanes.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: hostedcontrolplanes.hypershift.openshift.io + Capability: "" + Category: cluster-api + FeatureGates: + - AROHCPManagedIdentities + - DynamicResourceAllocation + - ExternalOIDC + - NetworkDiagnosticsConfig + - OpenStack + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: HostedControlPlane + Labels: {} + PluralName: hostedcontrolplanes + PrinterColumns: [] + Scope: Namespaced + ShortNames: + - hcp + - hcps + TopLevelFeatureGates: [] + Version: v1beta1 + +nodepools.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: nodepools.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: + - OpenStack + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: NodePool + Labels: {} + PluralName: nodepools + PrinterColumns: + - description: Cluster + jsonPath: .spec.clusterName + name: Cluster + type: string + - description: Desired Nodes + jsonPath: .spec.replicas + name: Desired Nodes + type: integer + - description: Available Nodes + jsonPath: .status.replicas + name: Current Nodes + type: integer + - description: Autoscaling Enabled + jsonPath: .status.conditions[?(@.type=="AutoscalingEnabled")].status + name: Autoscaling + type: string + - description: Node Autorepair Enabled + jsonPath: .status.conditions[?(@.type=="AutorepairEnabled")].status + name: Autorepair + type: string + - description: Current version + jsonPath: .status.version + name: Version + type: string + - description: UpdatingVersion in progress + jsonPath: .status.conditions[?(@.type=="UpdatingVersion")].status + name: UpdatingVersion + type: string + - description: UpdatingConfig in progress + jsonPath: .status.conditions[?(@.type=="UpdatingConfig")].status + name: UpdatingConfig + type: string + - description: Message + jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Message + type: string + Scope: Namespaced + ShortNames: + - np + - nps + TopLevelFeatureGates: [] + Version: v1beta1 + diff --git a/vendor/github.com/openshift/hypershift/api/ibmcapi/types.go b/vendor/github.com/openshift/hypershift/api/ibmcapi/types.go new file mode 100644 index 000000000..25c8e5b7e --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/ibmcapi/types.go @@ -0,0 +1,15 @@ +// Types copied from https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/ba09aa01b5f23d13917fc455b9e6aaa885809e23/api/v1beta2/ibmpowervsmachine_types.go + +package ibmcapi + +// PowerVSProcessorType enum attribute to identify the PowerVS instance processor type. +type PowerVSProcessorType string + +const ( + // PowerVSProcessorTypeDedicated enum property to identify a Dedicated Power VS processor type. + PowerVSProcessorTypeDedicated PowerVSProcessorType = "Dedicated" + // PowerVSProcessorTypeShared enum property to identify a Shared Power VS processor type. + PowerVSProcessorTypeShared PowerVSProcessorType = "Shared" + // PowerVSProcessorTypeCapped enum property to identify a Capped Power VS processor type. + PowerVSProcessorTypeCapped PowerVSProcessorType = "Capped" +) diff --git a/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go b/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go new file mode 100644 index 000000000..63fc6b7f4 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go @@ -0,0 +1,129 @@ +// Package ipnet wraps net.IPNet to get CIDR serialization. +// derived from: https://github.com/openshift/installer/blob/e6ac416efbf6d8dcc5a36e1187a4e05bbe7c9319/pkg/ipnet/ipnet.go +package ipnet + +import ( + "encoding/json" + "fmt" + "net" + "strings" +) + +var nullString = "null" +var nilString = "" +var nullBytes = []byte(nullString) + +// IPNet wraps net.IPNet to get CIDR serialization. +// +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:MaxLength=43 +// +kubebuilder:validation:XValidation:rule=`self.matches('^((\\d{1,3}\\.){3}\\d{1,3}/\\d{1,2})$') || self.matches('^([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?/[0-9]{1,3}$')`,message="cidr must be a valid IPv4 or IPv6 CIDR notation (e.g., 192.168.1.0/24 or 2001:db8::/64)" +type IPNet net.IPNet + +type IPNets []IPNet + +func (ipnets IPNets) StringSlice() []string { + out := make([]string, 0, len(ipnets)) + for _, n := range ipnets { + out = append(out, n.String()) + } + return out +} + +func (ipnets IPNets) CSVString() string { + return strings.Join(ipnets.StringSlice(), ",") +} + +// String returns a CIDR serialization of the subnet, or an empty +// string if the subnet is nil. +func (ipnet *IPNet) String() string { + if ipnet == nil { + return "" + } + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON interface for an IPNet +func (ipnet *IPNet) MarshalJSON() (data []byte, err error) { + if ipnet == nil || len(ipnet.IP) == 0 { + return nullBytes, nil + } + + return json.Marshal(ipnet.String()) +} + +// UnmarshalJSON interface for an IPNet +func (ipnet *IPNet) UnmarshalJSON(b []byte) (err error) { + if string(b) == nullString { + ipnet.IP = net.IP{} + ipnet.Mask = net.IPMask{} + return nil + } + + var cidr string + err = json.Unmarshal(b, &cidr) + if err != nil { + return fmt.Errorf("could not unmarshal string: %w", err) + } + + if cidr == nilString { + ipnet.IP = net.IP{} + ipnet.Mask = net.IPMask{} + return nil + } + + parsedIPNet, err := ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("could not parse cidr %s: %w", cidr, err) + } + + *ipnet = *parsedIPNet + + return nil +} + +func (in *IPNet) DeepCopy() *IPNet { + out := IPNet{ + IP: append([]byte{}, in.IP...), + Mask: append([]byte{}, in.Mask...), + } + return &out +} + +func (in *IPNet) DeepCopyInto(out *IPNet) { + clone := in.DeepCopy() + *out = *clone +} + +// ParseCIDR parses a CIDR from its string representation. +func ParseCIDR(s string) (*IPNet, error) { + ip, cidr, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + // This check is needed in order to work around a strange quirk in the Go + // standard library. All of the addresses returned by net.ParseCIDR() are + // 16-byte addresses. This does _not_ imply that they are IPv6 addresses, + // which is what some libraries (e.g. github.com/apparentlymart/go-cidr) + // assume. By forcing the address to be the expected length, we can work + // around these bugs. + if ip.To4() != nil { + ip = ip.To4() + } + + return &IPNet{ + IP: ip, + Mask: cidr.Mask, + }, nil +} + +// MustParseCIDR parses a CIDR from its string representation. If the parse fails, +// the function will panic. +func MustParseCIDR(s string) *IPNet { + cidr, err := ParseCIDR(s) + if err != nil { + panic(err) + } + return cidr +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 000000000..de58dfb8d --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 000000000..e3784123c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 000000000..060fd6c64 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f3..7688c356b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3ec..617b4a476 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 98a49c6b6..0c5f64aa8 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,30 +775,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -838,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -852,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -871,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2203,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2349,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398a..6ff6bee7e 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780e..ac76165ce 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e1..6e08a76a7 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488b..ac54ecaba 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -551,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -654,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -664,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529..3a5e776f8 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2..6f15ba1ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897..099867dee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,55 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8..a6a2d2fc2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e2628..f08abd434 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2592,3 +2653,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75..745e5c7e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451..dd2262a40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a28894..8cf3670bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c71..b86ded549 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c..4e92e5aa4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 000000000..07ac8e09d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 000000000..297e97bce --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa8524..d73c4652e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c0..4a55a4005 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b47..de3b46248 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,6 +457,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -494,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -928,6 +930,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -941,9 +944,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1705,6 +1705,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1781,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1861,6 +1863,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1908,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2173,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2342,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2417,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2498,6 +2517,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2902,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3179,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3192,8 +3230,10 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3592,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd57..8aa6d77c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -151,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317afd..da428f425 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -151,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668ac..bf45bfec7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508b..71c67162b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -152,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afdc..9476628fa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -152,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92cb..b9e85f3cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba7..a48b68a76 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0d..ea00e8522 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e1..91c646871 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3a..8cbf38d63 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa7281..a2df73419 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e01503..247913792 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e9..d265f146e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214a..3f2d64439 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d90..5d8b727a1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -153,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3..1ec2b1407 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240..24b346e1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb2840..ebd213100 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b..824b9c2d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab..4f178a229 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a1..af30da557 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2212,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410b..1851df14e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b561731..0b43c6936 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751c..e1ec0dbe4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656f..880c6d6e3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776d..7c8452a63 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f9..b8ef95b0f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d072..2ffdf861f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d53212..2af3b5c76 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e67..1da08d526 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b62..b7a251353 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354c..6e85b0aac 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff06..f15dadf05 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed58..28b487df2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a4..1e7f321e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc5..524b0820c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d524763..f485dbf45 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c74770613..70b35bf3b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f6..1893e2fe8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346c..16a4017da 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018d..7e567f1ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b8..38ae55e5e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca7..55e92e60a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c39..60658d6a0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc43..e203e8a7e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b4..5944b97d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e16357..c66d416da 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9f..a5459e766 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad5..01d86825b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d74..7b703e77c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3..d003c3d43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74..0d45a941a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3..51e13eb05 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee7..d002d8ef3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8..3f863d898 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acf..61c729310 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c790..b5d17414f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b8348..3a69e4549 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -515,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2485,7 +2514,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3473,7 +3502,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3765,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,7 +3834,10 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3947,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4007,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4605,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc0414..ad05b51a6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba..4e613cf63 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70..b6e1ab76f 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f3..5cee9a314 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -211,6 +213,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole @@ -1368,9 +1378,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db0..7b97a154c 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2003,7 +2004,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2032,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -3404,3 +3463,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b..4c2e1bdc0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -246,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -346,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -477,12 +482,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -788,6 +797,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -2149,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2157,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3025,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3041,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { @@ -4073,6 +4124,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4106,6 +4163,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4115,6 +4181,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/golang.org/x/term/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/term/LICENSE +++ b/vendor/golang.org/x/term/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f56060..df6bf948e 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/modules.txt b/vendor/modules.txt index b6f4295fd..190913dce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -307,8 +307,8 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/api v0.0.0-20240422085825-2624175e9673 -## explicit; go 1.21 +# github.com/openshift/api v0.0.0-20240524162738-d899f8877d22 +## explicit; go 1.22.0 github.com/openshift/api/config/v1 github.com/openshift/api/config/v1alpha1 github.com/openshift/api/machineconfiguration/v1 @@ -328,6 +328,11 @@ github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/control # github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 ## explicit; go 1.12 github.com/openshift/custom-resource-status/conditions/v1 +# github.com/openshift/hypershift/api v0.0.0-20241115183703-d41904871380 +## explicit; go 1.22.0 +github.com/openshift/hypershift/api/hypershift/v1beta1 +github.com/openshift/hypershift/api/ibmcapi +github.com/openshift/hypershift/api/util/ipnet # github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e ## explicit; go 1.18 github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1 @@ -389,7 +394,7 @@ golang.org/x/exp/maps golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.26.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -408,18 +413,18 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.26.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.21.0 +# golang.org/x/term v0.25.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.16.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding