From 6a8020e91d351e592757529a1d33a011ab9bca93 Mon Sep 17 00:00:00 2001 From: kartik-579 <84493919+kartik-579@users.noreply.github.com> Date: Sat, 9 Jul 2022 09:41:08 +0530 Subject: [PATCH] feat: Kube capacity support (#1601) * wip * wip * wip * updated argo cd version * updated k8s/application.go as per k8s library * updated imports for argo-workflows and argo-cd lib * cleared sessionManager implementations * handled changes in gitlab lib update * handled lib changes in k8sUtil.go * updated AppListingService.go and AppService.go * added argoproj/gitops-engine lib * updated CILogService.go & WorkflowDagExecutor.go * updated InstalledAppService.go & AppListingRestHandler.go * updated go version * fixed go version * updated argocd image * Added new argocd manifests * updated authenticator import * script-changes * fixed loginService binding * updated authenticator * updated installation script for initial argocd pwd * updated log for debugging * updated Dockerfile * updated Dockerfile * updated Dockerfile * removed googleapis/gnostic * resynced googleapis/gnostic * updated argo-cd/assets * updated vendor files and k8sCapacityService.go * updated deprecated apis - argo cd repository * updated spec * updated spec * wip * updated spec * updated k8sApplicationService.go * changes in spec * changes in spec * wip - completed node detail api * wip - update manifest * updated bean * updated orchestrator image * updated labels data in node listing api * update err handling for cluster detail * updated logs for debugigng * wip * fix bug - resource list addition panic * updated response for cluster listing api * updated group for manifest fetch/update request * hardcoded cluster list for testing * added log * updated node errors field in cluster listing api resp * updated node gvk for manifest fetch request * updated logs for debugging * updated missing field data - part 1 * updated missing field data - part 2 * wip * fixed taint and nodeError issue * updated vendor/argoproj/argo-cd/assets * updated condition filtering in node detail * update node error condition for testing * updated devtron image * updated cron service for getting cluster connection status * updated cluster name in connected cluster resp * updated cluster cron service status error * updated node error for testing * updated resource value string * fixed limits and requests logic * wip - updated allocatable field in node detail api * updated resource values * updated server version * updated resoource string * removed unneeded network policy * updated resp for sorting at FE * updated orchestrator image * updated rbac * updated argocd lib version to v2.4.0 * updated argo cd image in manifest * update manifest * fixed bug for cluster detail page and rbac * updated devtron image * updated logs for debugging * updated fix for non-default cluster unauthorized issue * added TODO * updated handling for clusters without metrics server * wip * changes for unavailable resource usage statistics and error handling * added argoUserService * wip * fixed wiring issues, updated argo api calls with new token * wip - token creation workaround * wip * fixed ea build issue * fixed token no extraction panic * updated devtron image * updated script no * updated app detail health status * updated devtron image * updated devtron image * review change * review change * updated devtron image * updated capability * updated image * updated code for cascade delete argo pipeline * updated authenticator import * merged argo-update * updated script no * restored argocd assets Co-authored-by: pawan-59 Co-authored-by: nishant --- 688 | 0 api/restHandler/common/apiError.go | 5 + api/router/router.go | 7 +- cmd/external-app/router.go | 6 + cmd/external-app/wire_gen.go | 9 +- go.mod | 3 +- go.sum | 1 + pkg/cluster/ClusterService.go | 4 + pkg/cluster/repository/ClusterRepository.go | 10 + scripts/sql/58_alter_cluster.down.sql | 2 + scripts/sql/58_alter_cluster.up.sql | 2 + specs/kube-capacity.yaml | 374 ++++ util/k8s/ClusterCronService.go | 114 ++ util/k8s/bean.go | 93 + util/k8s/k8sApplicationService.go | 33 +- util/k8s/k8sCapacityRestHandler.go | 262 +++ util/k8s/k8sCapacityRouter.go | 36 + util/k8s/k8sCapacityService.go | 667 +++++++ util/k8s/wire_k8sApp.go | 9 +- vendor/k8s.io/metrics/LICENSE | 201 ++ vendor/k8s.io/metrics/pkg/apis/metrics/doc.go | 21 + .../metrics/pkg/apis/metrics/register.go | 55 + .../k8s.io/metrics/pkg/apis/metrics/types.go | 101 + .../metrics/pkg/apis/metrics/v1alpha1/doc.go | 24 + .../pkg/apis/metrics/v1alpha1/generated.pb.go | 1758 +++++++++++++++++ .../pkg/apis/metrics/v1alpha1/generated.proto | 95 + .../pkg/apis/metrics/v1alpha1/register.go | 53 + .../pkg/apis/metrics/v1alpha1/types.go | 101 + .../v1alpha1/zz_generated.conversion.go | 209 ++ .../metrics/v1alpha1/zz_generated.deepcopy.go | 186 ++ .../metrics/pkg/apis/metrics/v1beta1/doc.go | 24 + .../pkg/apis/metrics/v1beta1/generated.pb.go | 1758 +++++++++++++++++ .../pkg/apis/metrics/v1beta1/generated.proto | 95 + .../pkg/apis/metrics/v1beta1/register.go | 53 + .../metrics/pkg/apis/metrics/v1beta1/types.go | 101 + .../v1beta1/zz_generated.conversion.go | 209 ++ .../metrics/v1beta1/zz_generated.deepcopy.go | 186 ++ .../pkg/apis/metrics/zz_generated.deepcopy.go | 186 ++ .../client/clientset/versioned/clientset.go | 130 ++ .../pkg/client/clientset/versioned/doc.go | 20 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../versioned/typed/metrics/v1alpha1/doc.go | 20 + .../metrics/v1alpha1/generated_expansion.go | 23 + .../typed/metrics/v1alpha1/metrics_client.go | 112 ++ .../typed/metrics/v1alpha1/nodemetrics.go | 98 + .../typed/metrics/v1alpha1/podmetrics.go | 103 + .../versioned/typed/metrics/v1beta1/doc.go | 20 + .../metrics/v1beta1/generated_expansion.go | 23 + .../typed/metrics/v1beta1/metrics_client.go | 112 ++ .../typed/metrics/v1beta1/nodemetrics.go | 98 + .../typed/metrics/v1beta1/podmetrics.go | 103 + vendor/modules.txt | 9 + wire_gen.go | 9 +- 54 files changed, 7999 insertions(+), 12 deletions(-) create mode 100644 688 create mode 100644 scripts/sql/58_alter_cluster.down.sql create mode 100644 scripts/sql/58_alter_cluster.up.sql create mode 100644 specs/kube-capacity.yaml create mode 100644 util/k8s/ClusterCronService.go create mode 100644 util/k8s/bean.go create mode 100644 util/k8s/k8sCapacityRestHandler.go create mode 100644 util/k8s/k8sCapacityRouter.go create mode 100644 util/k8s/k8sCapacityService.go create mode 100644 vendor/k8s.io/metrics/LICENSE create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/register.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/types.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go create mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go diff --git a/688 b/688 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/restHandler/common/apiError.go b/api/restHandler/common/apiError.go index a8e9452804..df772e4060 100644 --- a/api/restHandler/common/apiError.go +++ b/api/restHandler/common/apiError.go @@ -78,6 +78,11 @@ func WriteJsonResp(w http.ResponseWriter, err error, respBody interface{}, statu apiErr.InternalMessage = errStatus.Error() apiErr.UserMessage = errStatus.Error() response.Errors = []*util.ApiError{apiErr} + } else if unexpectedObjectError, ok := err.(*errors2.UnexpectedObjectError); ok { + apiErr := &util.ApiError{} + apiErr.InternalMessage = unexpectedObjectError.Error() + apiErr.UserMessage = unexpectedObjectError.Error() + response.Errors = []*util.ApiError{apiErr} } else { apiErr := &util.ApiError{} apiErr.Code = "000" // 000=unknown diff --git a/api/router/router.go b/api/router/router.go index b7428b8565..958eff19cf 100644 --- a/api/router/router.go +++ b/api/router/router.go @@ -110,6 +110,7 @@ type MuxRouter struct { serverRouter server.ServerRouter apiTokenRouter apiToken.ApiTokenRouter helmApplicationStatusUpdateHandler cron.HelmApplicationStatusUpdateHandler + k8sCapacityRouter k8s.K8sCapacityRouter } func NewMuxRouter(logger *zap.SugaredLogger, HelmRouter HelmRouter, PipelineConfigRouter PipelineConfigRouter, @@ -135,7 +136,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, HelmRouter HelmRouter, PipelineConf commonDeploymentRouter appStoreDeployment.CommonDeploymentRouter, externalLinkRouter externalLink.ExternalLinkRouter, globalPluginRouter GlobalPluginRouter, selfRegistrationRolesRouter user.SelfRegistrationRolesRouter, moduleRouter module.ModuleRouter, serverRouter server.ServerRouter, apiTokenRouter apiToken.ApiTokenRouter, - helmApplicationStatusUpdateHandler cron.HelmApplicationStatusUpdateHandler) *MuxRouter { + helmApplicationStatusUpdateHandler cron.HelmApplicationStatusUpdateHandler, k8sCapacityRouter k8s.K8sCapacityRouter) *MuxRouter { r := &MuxRouter{ Router: mux.NewRouter(), HelmRouter: HelmRouter, @@ -198,6 +199,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, HelmRouter HelmRouter, PipelineConf serverRouter: serverRouter, apiTokenRouter: apiTokenRouter, helmApplicationStatusUpdateHandler: helmApplicationStatusUpdateHandler, + k8sCapacityRouter: k8sCapacityRouter, } return r } @@ -388,4 +390,7 @@ func (r MuxRouter) Init() { // api-token router apiTokenRouter := r.Router.PathPrefix("/orchestrator/api-token").Subrouter() r.apiTokenRouter.InitApiTokenRouter(apiTokenRouter) + + k8sCapacityApp := r.Router.PathPrefix("/orchestrator/k8s/capacity").Subrouter() + r.k8sCapacityRouter.InitK8sCapacityRouter(k8sCapacityApp) } diff --git a/cmd/external-app/router.go b/cmd/external-app/router.go index b2709044a0..6c894b54c5 100644 --- a/cmd/external-app/router.go +++ b/cmd/external-app/router.go @@ -47,6 +47,7 @@ type MuxRouter struct { moduleRouter module.ModuleRouter serverRouter server.ServerRouter apiTokenRouter apiToken.ApiTokenRouter + k8sCapacityRouter k8s.K8sCapacityRouter } func NewMuxRouter( @@ -69,6 +70,7 @@ func NewMuxRouter( externalLinkRouter externalLink.ExternalLinkRouter, moduleRouter module.ModuleRouter, serverRouter server.ServerRouter, apiTokenRouter apiToken.ApiTokenRouter, + k8sCapacityRouter k8s.K8sCapacityRouter, ) *MuxRouter { r := &MuxRouter{ Router: mux.NewRouter(), @@ -92,6 +94,7 @@ func NewMuxRouter( moduleRouter: moduleRouter, serverRouter: serverRouter, apiTokenRouter: apiTokenRouter, + k8sCapacityRouter: k8sCapacityRouter, } return r } @@ -153,6 +156,9 @@ func (r *MuxRouter) Init() { k8sApp := r.Router.PathPrefix("/orchestrator/k8s").Subrouter() r.k8sApplicationRouter.InitK8sApplicationRouter(k8sApp) + k8sCapacityApp := r.Router.PathPrefix("/orchestrator/k8s/capacity").Subrouter() + r.k8sCapacityRouter.InitK8sCapacityRouter(k8sCapacityApp) + // chart-repo router starts chartRepoRouter := r.Router.PathPrefix("/orchestrator/chart-repo").Subrouter() r.chartRepositoryRouter.Init(chartRepoRouter) diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index b49508c122..e756a8fcc5 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -257,7 +257,14 @@ func InitializeApp() (*App, error) { apiTokenServiceImpl := apiToken.NewApiTokenServiceImpl(sugaredLogger, apiTokenSecretServiceImpl, userServiceImpl, userAuditServiceImpl, apiTokenRepositoryImpl) apiTokenRestHandlerImpl := apiToken2.NewApiTokenRestHandlerImpl(sugaredLogger, apiTokenServiceImpl, userServiceImpl, enforcerImpl, validate) apiTokenRouterImpl := apiToken2.NewApiTokenRouterImpl(apiTokenRestHandlerImpl) - muxRouter := NewMuxRouter(sugaredLogger, ssoLoginRouterImpl, teamRouterImpl, userAuthRouterImpl, userRouterImpl, clusterRouterImpl, dashboardRouterImpl, helmAppRouterImpl, environmentRouterImpl, k8sApplicationRouterImpl, chartRepositoryRouterImpl, appStoreDiscoverRouterImpl, appStoreValuesRouterImpl, appStoreDeploymentRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl) + clusterCronServiceImpl, err := k8s.NewClusterCronServiceImpl(sugaredLogger, clusterServiceImpl, k8sApplicationServiceImpl, clusterRepositoryImpl) + if err != nil { + return nil, err + } + k8sCapacityServiceImpl := k8s.NewK8sCapacityServiceImpl(sugaredLogger, clusterServiceImpl, k8sApplicationServiceImpl, k8sClientServiceImpl, clusterCronServiceImpl) + k8sCapacityRestHandlerImpl := k8s.NewK8sCapacityRestHandlerImpl(sugaredLogger, k8sCapacityServiceImpl, userServiceImpl, enforcerImpl, clusterServiceImpl, environmentServiceImpl) + k8sCapacityRouterImpl := k8s.NewK8sCapacityRouterImpl(k8sCapacityRestHandlerImpl) + muxRouter := NewMuxRouter(sugaredLogger, ssoLoginRouterImpl, teamRouterImpl, userAuthRouterImpl, userRouterImpl, clusterRouterImpl, dashboardRouterImpl, helmAppRouterImpl, environmentRouterImpl, k8sApplicationRouterImpl, chartRepositoryRouterImpl, appStoreDiscoverRouterImpl, appStoreValuesRouterImpl, appStoreDeploymentRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, k8sCapacityRouterImpl) mainApp := NewApp(db, sessionManager, muxRouter, telemetryEventClientImpl, sugaredLogger) return mainApp, nil } diff --git a/go.mod b/go.mod index 978411752f..4231e8e7c0 100644 --- a/go.mod +++ b/go.mod @@ -63,6 +63,8 @@ require ( k8s.io/apimachinery v0.23.3 k8s.io/client-go v11.0.1-0.20190820062731-7e43eff7c80a+incompatible k8s.io/helm v2.12.3+incompatible + k8s.io/kubectl v0.23.1 + k8s.io/metrics v0.23.1 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 sigs.k8s.io/yaml v1.3.0 ) @@ -234,7 +236,6 @@ require ( k8s.io/klog/v2 v2.40.1 // indirect k8s.io/kube-aggregator v0.23.1 // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect - k8s.io/kubectl v0.23.1 // indirect k8s.io/kubernetes v1.23.1 // indirect mellium.im/sasl v0.2.1 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect diff --git a/go.sum b/go.sum index c4c410a19e..df3d8a9664 100644 --- a/go.sum +++ b/go.sum @@ -1929,6 +1929,7 @@ k8s.io/kubelet v0.23.1/go.mod h1:WdvMiehtNPhtiW8sSVVvr8YYU00L0u+0HkfMDEB0LKM= k8s.io/kubernetes v1.23.1 h1:iJfubd03CDap4m69Ue+u2I6quNUYiYlC8+TakEHATjc= k8s.io/kubernetes v1.23.1/go.mod h1:baMGbPpwwP0kT/+eAPtdqoWNRoXyyTJ2Zf+fw/Y8t04= k8s.io/legacy-cloud-providers v0.23.1/go.mod h1:HIt+r/ReEfjS6IGaGfpZ7tCna7hbMBXMOaIp/SWABVE= +k8s.io/metrics v0.23.1 h1:ZKrRdjarB/JPl4nPef7SlMjAUUVzU5XdSKgT+cm6bFA= k8s.io/metrics v0.23.1/go.mod h1:qXvsM1KANrc+ZZeFwj6Phvf0NLiC+d3RwcsLcdGc+xs= k8s.io/mount-utils v0.23.1/go.mod h1:9pFhzVjxle1osJUo++9MFDat9HPkQUOoHCn+eExZ3Ew= k8s.io/pod-security-admission v0.23.1/go.mod h1:WDb/vFWf7jKSGe2e07LTEjDZ0MHMDhUIzXNvQ45HytU= diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index 20124b7c1c..83b5e2856d 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -46,6 +46,7 @@ type ClusterBean struct { AgentInstallationStage int `json:"agentInstallationStage,notnull"` // -1=external, 0=not triggered, 1=progressing, 2=success, 3=fails K8sVersion string `json:"k8sVersion"` HasConfigOrUrlChanged bool `json:"-"` + ErrorInConnecting string `json:"-"` } type PrometheusAuth struct { @@ -240,6 +241,8 @@ func (impl *ClusterServiceImpl) FindAll() ([]*ClusterBean, error) { ServerUrl: m.ServerUrl, Active: m.Active, K8sVersion: m.K8sVersion, + ErrorInConnecting: m.ErrorInConnecting, + Config: m.Config, }) } return beans, nil @@ -261,6 +264,7 @@ func (impl *ClusterServiceImpl) FindAllActive() ([]ClusterBean, error) { AgentInstallationStage: m.AgentInstallationStage, Config: m.Config, K8sVersion: m.K8sVersion, + ErrorInConnecting: m.ErrorInConnecting, }) } return beans, nil diff --git a/pkg/cluster/repository/ClusterRepository.go b/pkg/cluster/repository/ClusterRepository.go index 11f302eab4..744ebdb7f1 100644 --- a/pkg/cluster/repository/ClusterRepository.go +++ b/pkg/cluster/repository/ClusterRepository.go @@ -38,6 +38,7 @@ type Cluster struct { PTlsClientKey string `sql:"p_tls_client_key"` AgentInstallationStage int `sql:"agent_installation_stage"` K8sVersion string `sql:"k8s_version"` + ErrorInConnecting string `sql:"error_in_connecting"` sql.AuditLog } @@ -53,6 +54,7 @@ type ClusterRepository interface { Update(model *Cluster) error Delete(model *Cluster) error MarkClusterDeleted(model *Cluster) error + UpdateClusterConnectionStatus(clusterId int, errorInConnecting string) error } func NewClusterRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) *ClusterRepositoryImpl { @@ -144,3 +146,11 @@ func (impl ClusterRepositoryImpl) MarkClusterDeleted(model *Cluster) error { model.Active = false return impl.dbConnection.Update(model) } + +func (impl ClusterRepositoryImpl) UpdateClusterConnectionStatus(clusterId int, errorInConnecting string) error { + cluster := &Cluster{} + _, err := impl.dbConnection.Model(cluster). + Set("error_in_connecting = ?", errorInConnecting).Where("id = ?", clusterId). + Update() + return err +} diff --git a/scripts/sql/58_alter_cluster.down.sql b/scripts/sql/58_alter_cluster.down.sql new file mode 100644 index 0000000000..1e97572971 --- /dev/null +++ b/scripts/sql/58_alter_cluster.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE cluster + DROP COLUMN error_in_connecting; \ No newline at end of file diff --git a/scripts/sql/58_alter_cluster.up.sql b/scripts/sql/58_alter_cluster.up.sql new file mode 100644 index 0000000000..3cbe966447 --- /dev/null +++ b/scripts/sql/58_alter_cluster.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE cluster + ADD COLUMN error_in_connecting TEXT; diff --git a/specs/kube-capacity.yaml b/specs/kube-capacity.yaml new file mode 100644 index 0000000000..ba7ed4baa2 --- /dev/null +++ b/specs/kube-capacity.yaml @@ -0,0 +1,374 @@ +openapi: "3.0.0" +info: + title: Kube capacity + version: "1.0" +paths: + /orchestrator/k8s/capacity/cluster/list: + get: + description: get list of clusters + operationId: GetClusterList + responses: + '200': + description: Successfully return list of cluster + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ClusterCapacityDto' + '400': + description: Bad Request. Input Validation error/wrong request body. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '403': + description: Unauthorized User + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /orchestrator/k8s/capacity/cluster/{clusterId}: + get: + description: get cluster detail + operationId: GetClusterDetail + parameters: + - name: clusterId + in: path + required: true + schema: + type: integer + responses: + '200': + description: Successfully return detail of cluster + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterCapacityDetailDto' + '400': + description: Bad Request. Input Validation error/wrong request body. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '403': + description: Unauthorized User + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /orchestrator/k8s/capacity/node/list: + get: + description: get node list + operationId: GetNodeList + parameters: + - name: clusterId + in: query + required: true + schema: + type: integer + responses: + '200': + description: Successfully return list of node + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/NodeCapacityDto' + '400': + description: Bad Request. Input Validation error/wrong request body. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '403': + description: Unauthorized User + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /orchestrator/k8s/capacity/node: + get: + description: get node detail + operationId: GetNodeDetail + parameters: + - name: clusterId + in: query + required: true + schema: + type: integer + - name: name + in: query + required: true + schema: + type: string + description: name of node + responses: + '200': + description: Successfully return node detail + content: + application/json: + schema: + $ref: '#/components/schemas/NodeCapacityDetailDto' + '400': + description: Bad Request. Input Validation error/wrong request body. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '403': + description: Unauthorized User + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + put: + description: update node manifest + operationId: UpdateNodeManifest + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NodeManifestUpdateDto' + responses: + '200': + description: Successfully return updated node manifest + content: + application/json: + schema: + $ref: '#/components/schemas/NodeManifestUpdateResponse' + '400': + description: Bad Request. Input Validation error/wrong request body. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '403': + description: Unauthorized User + content: + application/json: + schema: + $ref: '#/components/schemas/Error' +components: + schemas: + ClusterCapacityDto: + type: object + properties: + id: + type: integer + name: + type: string + nodeCount: + type: integer + nodeErrors: + type: array + items: + type: string + nodeK8sVersions: + type: array + items: + type: string + errorInNodeListing: + type: boolean + cpu: + $ref: '#/components/schemas/ResourceDetailObject' + memory: + $ref: '#/components/schemas/ResourceDetailObject' + ClusterCapacityDetailDto: + type: object + properties: + cpu: + $ref: '#/components/schemas/ResourceDetailObject' + memory: + $ref: '#/components/schemas/ResourceDetailObject' + NodeCapacityDto: + type: object + properties: + name: + type: string + status: + type: string + roles: + type: array + items: + type: string + errors: + type: array + items: + $ref: '#/components/schemas/NodeError' + k8sVersion: + type: string + podCount: + type: integer + taintCount: + type: integer + cpu: + $ref: '#/components/schemas/ResourceDetailObject' + memory: + $ref: '#/components/schemas/ResourceDetailObject' + age: + type: string + labels: + type: array + items: + $ref: '#/components/schemas/LabelTaintObject' + NodeCapacityDetailObject: + type: object + properties: + name: + type: string + roles: + type: array + items: + type: string + k8sVersion: + type: string + unschedulable: + type: boolean + createdAt: + type: string + internalIp: + type: string + externalIp: + type: string + resources: + type: array + items: + $ref: '#/components/schemas/ResourceDetailObject' + labels: + type: array + items: + $ref: '#/components/schemas/LabelTaintObject' + annotations: + type: array + items: + $ref: '#/components/schemas/LabelTaintObject' + taints: + type: array + items: + $ref: '#/components/schemas/LabelTaintObject' + conditions: + type: array + items: + $ref: '#/components/schemas/NodeConditionObject' + errors: + type: array + items: + $ref: '#/components/schemas/NodeError' + pods: + type: array + items: + $ref: '#/components/schemas/PodCapacityDto' + manifest: + type: string + version: + type: string + kind: + type: string + NodeError: + type: object + description: map of conditionType(key) and error(value) + PodCapacityDto: + type: object + properties: + name: + type: string + namespace: + type: string + cpu: + $ref: '#/components/schemas/ResourceDetailObject' + memory: + $ref: '#/components/schemas/ResourceDetailObject' + age: + type: string + NodeManifestUpdateDto: + type: object + properties: + clusterId: + type: integer + name: + type: string + manifestPatch: + type: string + version: + type: string + kind: + type: string + NodeManifestUpdateResponse: + type: object + properties: + manifest: + type: string + ResourceDetailObject: + type: object + properties: + name: + type: string + capacity: + type: string + allocatable: + type: string + usage: + type: string + request: + type: string + limit: + type: string + usagePercentage: + type: string + requestPercentage: + type: string + limitPercentage: + type: string + LabelTaintObject: + type: object + properties: + key: + type: string + value: + type: string + effect: + type: string + NodeConditionObject: + type: object + properties: + type: + type: string + haveIssue: + type: boolean + reason: + type: string + message: + type: string diff --git a/util/k8s/ClusterCronService.go b/util/k8s/ClusterCronService.go new file mode 100644 index 0000000000..f95d05ea1a --- /dev/null +++ b/util/k8s/ClusterCronService.go @@ -0,0 +1,114 @@ +package k8s + +import ( + "context" + "fmt" + "github.com/devtron-labs/devtron/pkg/cluster" + clusterRepository "github.com/devtron-labs/devtron/pkg/cluster/repository" + "github.com/robfig/cron/v3" + "go.uber.org/zap" + "k8s.io/client-go/kubernetes" + "log" + "sync" +) + +type ClusterCronService interface { +} + +type ClusterCronServiceImpl struct { + logger *zap.SugaredLogger + clusterService cluster.ClusterService + k8sApplicationService K8sApplicationService + clusterRepository clusterRepository.ClusterRepository +} + +func NewClusterCronServiceImpl(logger *zap.SugaredLogger, clusterService cluster.ClusterService, + k8sApplicationService K8sApplicationService, clusterRepository clusterRepository.ClusterRepository) (*ClusterCronServiceImpl, error) { + clusterCronServiceImpl := &ClusterCronServiceImpl{ + logger: logger, + clusterService: clusterService, + k8sApplicationService: k8sApplicationService, + clusterRepository: clusterRepository, + } + // initialise cron + newCron := cron.New(cron.WithChain()) + newCron.Start() + + // add function into cron + //TODO: get cron time from env var + _, err := newCron.AddFunc(fmt.Sprint("@every 15m"), clusterCronServiceImpl.GetAndUpdateClusterConnectionStatus) + if err != nil { + fmt.Println("error in adding cron function into cluster cron service") + return clusterCronServiceImpl, err + } + return clusterCronServiceImpl, nil +} + +func (impl *ClusterCronServiceImpl) GetAndUpdateClusterConnectionStatus() { + impl.logger.Debug("starting cluster connection status fetch thread") + defer impl.logger.Debug("stopped cluster connection status fetch thread") + + //getting all clusters + clusters, err := impl.clusterService.FindAll() + if err != nil { + impl.logger.Errorw("error in getting all clusters", "err", err) + return + } + wg := &sync.WaitGroup{} + wg.Add(len(clusters)) + mutex := &sync.Mutex{} + //map of clusterId and error in its connection check process + respMap := make(map[int]error) + for _, cluster := range clusters { + // getting restConfig and clientSet outside the goroutine because we don't want to call goroutine func with receiver function + restConfig, err := impl.k8sApplicationService.GetRestConfigByCluster(cluster) + if err != nil { + impl.logger.Errorw("error in getting restConfig by cluster", "err", err, "clusterId", cluster.Id) + mutex.Lock() + respMap[cluster.Id] = err + mutex.Unlock() + continue + } + k8sClientSet, err := kubernetes.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting client set by rest config", "err", err, "restConfig", restConfig) + mutex.Lock() + respMap[cluster.Id] = err + mutex.Unlock() + continue + } + go GetAndUpdateConnectionStatusForOneCluster(k8sClientSet, cluster.Id, respMap, wg, mutex) + } + wg.Wait() + impl.HandleErrorInClusterConnections(respMap) + return +} + +func GetAndUpdateConnectionStatusForOneCluster(k8sClientSet *kubernetes.Clientset, clusterId int, respMap map[int]error, wg *sync.WaitGroup, mutex *sync.Mutex) { + defer wg.Done() + //using livez path as healthz path is deprecated + path := "/livez" + response, err := k8sClientSet.Discovery().RESTClient().Get().AbsPath(path).DoRaw(context.Background()) + log.Println("received response for cluster livez status", "response", string(response), "err", err, "clusterId", clusterId) + if err == nil && string(response) != "ok" { + err = fmt.Errorf("ErrorNotOk : response != 'ok' : %s", string(response)) + } + mutex.Lock() + respMap[clusterId] = err + mutex.Unlock() + return +} + +func (impl *ClusterCronServiceImpl) HandleErrorInClusterConnections(respMap map[int]error) { + for clusterId, err := range respMap { + errorInConnecting := "" + if err != nil { + errorInConnecting = err.Error() + } + //updating cluster connection status + errInUpdating := impl.clusterRepository.UpdateClusterConnectionStatus(clusterId, errorInConnecting) + if errInUpdating != nil { + impl.logger.Errorw("error in updating cluster connection status", "err", err, "clusterId", clusterId, "errorInConnecting", errorInConnecting) + } + } +} diff --git a/util/k8s/bean.go b/util/k8s/bean.go new file mode 100644 index 0000000000..d4def4235a --- /dev/null +++ b/util/k8s/bean.go @@ -0,0 +1,93 @@ +package k8s + +import ( + metav1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +type ClusterCapacityDetail struct { + Id int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + ErrorInConnection string `json:"errorInNodeListing,omitempty"` + NodeCount int `json:"nodeCount,omitempty"` + NodeErrors map[metav1.NodeConditionType][]string `json:"nodeErrors"` + NodeK8sVersions []string `json:"nodeK8sVersions"` + ServerVersion string `json:"serverVersion,omitempty"` + Cpu *ResourceDetailObject `json:"cpu"` + Memory *ResourceDetailObject `json:"memory"` +} + +type NodeCapacityDetail struct { + Name string `json:"name"` + Version string `json:"version,omitempty"` + Kind string `json:"kind,omitempty"` + Roles []string `json:"roles"` + K8sVersion string `json:"k8sVersion"` + Cpu *ResourceDetailObject `json:"cpu,omitempty"` + Memory *ResourceDetailObject `json:"memory,omitempty"` + Age string `json:"age,omitempty"` + Status string `json:"status,omitempty"` + PodCount int `json:"podCount,omitempty"` + TaintCount int `json:"taintCount,omitempty"` + Errors map[metav1.NodeConditionType]string `json:"errors"` + InternalIp string `json:"internalIp"` + ExternalIp string `json:"externalIp"` + Unschedulable bool `json:"unschedulable"` + CreatedAt string `json:"createdAt"` + Labels []*LabelAnnotationTaintObject `json:"labels,omitempty"` + Annotations []*LabelAnnotationTaintObject `json:"annotations,omitempty"` + Taints []*LabelAnnotationTaintObject `json:"taints,omitempty"` + Conditions []*NodeConditionObject `json:"conditions,omitempty"` + Resources []*ResourceDetailObject `json:"resources,omitempty"` + Pods []*PodCapacityDetail `json:"pods,omitempty"` + Manifest unstructured.Unstructured `json:"manifest,omitempty"` + ClusterName string `json:"clusterName,omitempty"` +} + +type PodCapacityDetail struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Cpu *ResourceDetailObject `json:"cpu"` + Memory *ResourceDetailObject `json:"memory"` + Age string `json:"age"` + CreatedAt string `json:"createdAt"` +} + +type ResourceDetailObject struct { + ResourceName string `json:"name,omitempty"` + Capacity string `json:"capacity,omitempty"` + Allocatable string `json:"allocatable,omitempty"` + Usage string `json:"usage,omitempty"` + Request string `json:"request,omitempty"` + Limit string `json:"limit,omitempty"` + UsagePercentage string `json:"usagePercentage,omitempty"` + RequestPercentage string `json:"requestPercentage,omitempty"` + LimitPercentage string `json:"limitPercentage,omitempty"` + //below fields to be used at FE for sorting + CapacityInBytes int64 `json:"capacityInBytes,omitempty"` + AllocatableInBytes int64 `json:"allocatableInBytes,omitempty"` + UsageInBytes int64 `json:"usageInBytes,omitempty"` + RequestInBytes int64 `json:"requestInBytes,omitempty"` + LimitInBytes int64 `json:"limitInBytes,omitempty"` +} + +type LabelAnnotationTaintObject struct { + Key string `json:"key"` + Value string `json:"value"` + Effect string `json:"effect,omitempty"` +} + +type NodeConditionObject struct { + Type string `json:"type"` + HaveIssue bool `json:"haveIssue"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +type NodeManifestUpdateDto struct { + ClusterId int `json:"clusterId"` + Name string `json:"name"` + ManifestPatch string `json:"manifestPatch"` + Version string `json:"version"` + Kind string `json:"kind"` +} diff --git a/util/k8s/k8sApplicationService.go b/util/k8s/k8sApplicationService.go index 76c96014a2..e31d8befd7 100644 --- a/util/k8s/k8sApplicationService.go +++ b/util/k8s/k8sApplicationService.go @@ -27,6 +27,8 @@ type K8sApplicationService interface { GetPodLogs(request *ResourceRequestBean) (io.ReadCloser, error) ValidateResourceRequest(appIdentifier *client.AppIdentifier, request *application.K8sRequestBean) (bool, error) GetResourceInfo() (*ResourceInfo, error) + GetRestConfigByClusterId(clusterId int) (*rest.Config, error) + GetRestConfigByCluster(cluster *cluster.ClusterBean) (*rest.Config, error) } type K8sApplicationServiceImpl struct { logger *zap.SugaredLogger @@ -65,7 +67,7 @@ type ResourceInfo struct { func (impl *K8sApplicationServiceImpl) GetResource(request *ResourceRequestBean) (*application.ManifestResponse, error) { //getting rest config by clusterId - restConfig, err := impl.getRestConfigByClusterId(request.AppIdentifier.ClusterId) + restConfig, err := impl.GetRestConfigByClusterId(request.AppIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.AppIdentifier.ClusterId) return nil, err @@ -98,7 +100,7 @@ func (impl *K8sApplicationServiceImpl) CreateResource(request *ResourceRequestBe } //getting rest config by clusterId - restConfig, err := impl.getRestConfigByClusterId(request.AppIdentifier.ClusterId) + restConfig, err := impl.GetRestConfigByClusterId(request.AppIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.AppIdentifier.ClusterId) return nil, err @@ -113,7 +115,7 @@ func (impl *K8sApplicationServiceImpl) CreateResource(request *ResourceRequestBe func (impl *K8sApplicationServiceImpl) UpdateResource(request *ResourceRequestBean) (*application.ManifestResponse, error) { //getting rest config by clusterId - restConfig, err := impl.getRestConfigByClusterId(request.AppIdentifier.ClusterId) + restConfig, err := impl.GetRestConfigByClusterId(request.AppIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.AppIdentifier.ClusterId) return nil, err @@ -128,7 +130,7 @@ func (impl *K8sApplicationServiceImpl) UpdateResource(request *ResourceRequestBe func (impl *K8sApplicationServiceImpl) DeleteResource(request *ResourceRequestBean) (*application.ManifestResponse, error) { //getting rest config by clusterId - restConfig, err := impl.getRestConfigByClusterId(request.AppIdentifier.ClusterId) + restConfig, err := impl.GetRestConfigByClusterId(request.AppIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.AppIdentifier.ClusterId) return nil, err @@ -143,7 +145,7 @@ func (impl *K8sApplicationServiceImpl) DeleteResource(request *ResourceRequestBe func (impl *K8sApplicationServiceImpl) ListEvents(request *ResourceRequestBean) (*application.EventsResponse, error) { //getting rest config by clusterId - restConfig, err := impl.getRestConfigByClusterId(request.AppIdentifier.ClusterId) + restConfig, err := impl.GetRestConfigByClusterId(request.AppIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.AppIdentifier.ClusterId) return nil, err @@ -158,7 +160,7 @@ func (impl *K8sApplicationServiceImpl) ListEvents(request *ResourceRequestBean) func (impl *K8sApplicationServiceImpl) GetPodLogs(request *ResourceRequestBean) (io.ReadCloser, error) { //getting rest config by clusterId - restConfig, err := impl.getRestConfigByClusterId(request.AppIdentifier.ClusterId) + restConfig, err := impl.GetRestConfigByClusterId(request.AppIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.AppIdentifier.ClusterId) return nil, err @@ -171,7 +173,7 @@ func (impl *K8sApplicationServiceImpl) GetPodLogs(request *ResourceRequestBean) return resp, nil } -func (impl *K8sApplicationServiceImpl) getRestConfigByClusterId(clusterId int) (*rest.Config, error) { +func (impl *K8sApplicationServiceImpl) GetRestConfigByClusterId(clusterId int) (*rest.Config, error) { cluster, err := impl.clusterService.FindById(clusterId) if err != nil { impl.logger.Errorw("error in getting cluster by ID", "err", err, "clusterId") @@ -192,6 +194,23 @@ func (impl *K8sApplicationServiceImpl) getRestConfigByClusterId(clusterId int) ( return restConfig, nil } +func (impl *K8sApplicationServiceImpl) GetRestConfigByCluster(cluster *cluster.ClusterBean) (*rest.Config, error) { + configMap := cluster.Config + bearerToken := configMap["bearer_token"] + var restConfig *rest.Config + var err error + if cluster.ClusterName == DEFAULT_CLUSTER && len(bearerToken) == 0 { + restConfig, err = rest.InClusterConfig() + if err != nil { + impl.logger.Errorw("error in getting rest config for default cluster", "err", err) + return nil, err + } + } else { + restConfig = &rest.Config{Host: cluster.ServerUrl, BearerToken: bearerToken, TLSClientConfig: rest.TLSClientConfig{Insecure: true}} + } + return restConfig, nil +} + func (impl *K8sApplicationServiceImpl) ValidateResourceRequest(appIdentifier *client.AppIdentifier, request *application.K8sRequestBean) (bool, error) { app, err := impl.helmAppService.GetApplicationDetail(context.Background(), appIdentifier) if err != nil { diff --git a/util/k8s/k8sCapacityRestHandler.go b/util/k8s/k8sCapacityRestHandler.go new file mode 100644 index 0000000000..50a2081f75 --- /dev/null +++ b/util/k8s/k8sCapacityRestHandler.go @@ -0,0 +1,262 @@ +package k8s + +import ( + "encoding/json" + "errors" + "github.com/devtron-labs/devtron/api/restHandler/common" + "github.com/devtron-labs/devtron/pkg/cluster" + "github.com/devtron-labs/devtron/pkg/user" + "github.com/devtron-labs/devtron/pkg/user/casbin" + "github.com/gorilla/mux" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" +) + +type K8sCapacityRestHandler interface { + GetClusterList(w http.ResponseWriter, r *http.Request) + GetClusterDetail(w http.ResponseWriter, r *http.Request) + GetNodeList(w http.ResponseWriter, r *http.Request) + GetNodeDetail(w http.ResponseWriter, r *http.Request) + UpdateNodeManifest(w http.ResponseWriter, r *http.Request) +} +type K8sCapacityRestHandlerImpl struct { + logger *zap.SugaredLogger + k8sCapacityService K8sCapacityService + userService user.UserService + enforcer casbin.Enforcer + clusterService cluster.ClusterService + environmentService cluster.EnvironmentService +} + +func NewK8sCapacityRestHandlerImpl(logger *zap.SugaredLogger, + k8sCapacityService K8sCapacityService, userService user.UserService, + enforcer casbin.Enforcer, + clusterService cluster.ClusterService, + environmentService cluster.EnvironmentService) *K8sCapacityRestHandlerImpl { + return &K8sCapacityRestHandlerImpl{ + logger: logger, + k8sCapacityService: k8sCapacityService, + userService: userService, + enforcer: enforcer, + clusterService: clusterService, + environmentService: environmentService, + } +} + +func (handler *K8sCapacityRestHandlerImpl) GetClusterList(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + token := r.Header.Get("token") + clusters, err := handler.clusterService.FindAll() + if err != nil { + handler.logger.Errorw("error in getting all clusters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + // RBAC enforcer applying + var authenticatedClusters []*cluster.ClusterBean + for _, cluster := range clusters { + authenticated, err := handler.CheckRbacForCluster(cluster, token) + if err != nil { + handler.logger.Errorw("error in checking rbac for cluster", "err", err, "clusterId", cluster.Id) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + if authenticated { + authenticatedClusters = append(authenticatedClusters, cluster) + } + } + if len(authenticatedClusters) == 0 { + common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + return + } + clusterDetailList, err := handler.k8sCapacityService.GetClusterCapacityDetailList(authenticatedClusters) + if err != nil { + handler.logger.Errorw("error in getting cluster capacity detail list", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, clusterDetailList, http.StatusOK) +} + +func (handler *K8sCapacityRestHandlerImpl) GetClusterDetail(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + clusterId, err := strconv.Atoi(vars["clusterId"]) + if err != nil { + handler.logger.Errorw("request err, GetClusterDetail", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + token := r.Header.Get("token") + // RBAC enforcer applying + cluster, err := handler.clusterService.FindById(clusterId) + if err != nil { + handler.logger.Errorw("error in getting cluster by id", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + authenticated, err := handler.CheckRbacForCluster(cluster, token) + if err != nil { + handler.logger.Errorw("error in checking rbac for cluster", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + if !authenticated { + common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + return + } + clusterDetail, err := handler.k8sCapacityService.GetClusterCapacityDetail(cluster, false) + if err != nil { + handler.logger.Errorw("error in getting cluster capacity detail", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, clusterDetail, http.StatusOK) +} + +func (handler *K8sCapacityRestHandlerImpl) GetNodeList(w http.ResponseWriter, r *http.Request) { + vars := r.URL.Query() + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + clusterId, err := strconv.Atoi(vars.Get("clusterId")) + if err != nil { + handler.logger.Errorw("request err, GetNodeList", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + // RBAC enforcer applying + token := r.Header.Get("token") + cluster, err := handler.clusterService.FindById(clusterId) + if err != nil { + handler.logger.Errorw("error in getting cluster by id", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + authenticated, err := handler.CheckRbacForCluster(cluster, token) + if err != nil { + handler.logger.Errorw("error in checking rbac for cluster", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + if !authenticated { + common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + return + } + nodeList, err := handler.k8sCapacityService.GetNodeCapacityDetailsListByCluster(cluster) + if err != nil { + handler.logger.Errorw("error in getting node detail list by cluster", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, nodeList, http.StatusOK) +} + +func (handler *K8sCapacityRestHandlerImpl) GetNodeDetail(w http.ResponseWriter, r *http.Request) { + vars := r.URL.Query() + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + clusterId, err := strconv.Atoi(vars.Get("clusterId")) + if err != nil { + handler.logger.Errorw("request err, GetNodeDetail", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + name := vars.Get("name") + if err != nil { + handler.logger.Errorw("request err, GetNodeDetail", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + // RBAC enforcer applying + token := r.Header.Get("token") + cluster, err := handler.clusterService.FindById(clusterId) + if err != nil { + handler.logger.Errorw("error in getting cluster by id", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + authenticated, err := handler.CheckRbacForCluster(cluster, token) + if err != nil { + handler.logger.Errorw("error in checking rbac for cluster", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + if !authenticated { + common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + return + } + nodeDetail, err := handler.k8sCapacityService.GetNodeCapacityDetailByNameAndCluster(cluster, name) + if err != nil { + handler.logger.Errorw("error in getting node detail by cluster", "err", err, "clusterId", clusterId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, nodeDetail, http.StatusOK) +} + +func (handler *K8sCapacityRestHandlerImpl) UpdateNodeManifest(w http.ResponseWriter, r *http.Request) { + decoder := json.NewDecoder(r.Body) + var manifestUpdateReq NodeManifestUpdateDto + err := decoder.Decode(&manifestUpdateReq) + if err != nil { + handler.logger.Errorw("error in decoding request body", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + // RBAC enforcer applying + token := r.Header.Get("token") + if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionUpdate, "*"); !ok { + common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + return + } + updatedManifest, err := handler.k8sCapacityService.UpdateNodeManifest(&manifestUpdateReq) + if err != nil { + handler.logger.Errorw("error in updating node manifest", "err", err, "updateRequest", manifestUpdateReq) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, updatedManifest, http.StatusOK) +} + +func (handler *K8sCapacityRestHandlerImpl) CheckRbacForCluster(cluster *cluster.ClusterBean, token string) (authenticated bool, err error) { + //getting all environments for this cluster + envs, err := handler.environmentService.GetByClusterId(cluster.Id) + if err != nil { + handler.logger.Errorw("error in getting environments by clusterId", "err", err, "clusterId", cluster.Id) + return false, err + } + if len(envs) == 0 { + if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); !ok { + return false, nil + } + return true, nil + } + for _, env := range envs { + if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobalEnvironment, casbin.ActionGet, strings.ToLower(env.EnvironmentIdentifier)); ok { + //if user has view permission to even one environment of this cluster, authorise the request + return true, nil + } + } + return false, nil +} diff --git a/util/k8s/k8sCapacityRouter.go b/util/k8s/k8sCapacityRouter.go new file mode 100644 index 0000000000..fffd8a644a --- /dev/null +++ b/util/k8s/k8sCapacityRouter.go @@ -0,0 +1,36 @@ +package k8s + +import ( + "github.com/gorilla/mux" +) + +type K8sCapacityRouter interface { + InitK8sCapacityRouter(helmRouter *mux.Router) +} +type K8sCapacityRouterImpl struct { + k8sCapacityRestHandler K8sCapacityRestHandler +} + +func NewK8sCapacityRouterImpl(k8sCapacityRestHandler K8sCapacityRestHandler) *K8sCapacityRouterImpl { + return &K8sCapacityRouterImpl{ + k8sCapacityRestHandler: k8sCapacityRestHandler, + } +} + +func (impl *K8sCapacityRouterImpl) InitK8sCapacityRouter(k8sCapacityRouter *mux.Router) { + + k8sCapacityRouter.Path("/cluster/list"). + HandlerFunc(impl.k8sCapacityRestHandler.GetClusterList).Methods("GET") + + k8sCapacityRouter.Path("/cluster/{clusterId}"). + HandlerFunc(impl.k8sCapacityRestHandler.GetClusterDetail).Methods("GET") + + k8sCapacityRouter.Path("/node/list"). + HandlerFunc(impl.k8sCapacityRestHandler.GetNodeList).Methods("GET") + + k8sCapacityRouter.Path("/node"). + HandlerFunc(impl.k8sCapacityRestHandler.GetNodeDetail).Methods("GET") + + k8sCapacityRouter.Path("/node"). + HandlerFunc(impl.k8sCapacityRestHandler.UpdateNodeManifest).Methods("PUT") +} diff --git a/util/k8s/k8sCapacityService.go b/util/k8s/k8sCapacityService.go new file mode 100644 index 0000000000..e35b420b5a --- /dev/null +++ b/util/k8s/k8sCapacityService.go @@ -0,0 +1,667 @@ +package k8s + +import ( + "context" + "fmt" + "github.com/devtron-labs/devtron/client/k8s/application" + "github.com/devtron-labs/devtron/pkg/cluster" + "go.uber.org/zap" + metav1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/duration" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + resourcehelper "k8s.io/kubectl/pkg/util/resource" + metrics "k8s.io/metrics/pkg/client/clientset/versioned" + "strings" + "time" +) + +const ( + labelNodeRolePrefix = "node-role.kubernetes.io/" + nodeLabelRole = "kubernetes.io/role" + Kibibyte = 1024 + Mebibyte = 1024 * 1024 + Gibibyte = 1024 * 1024 * 1024 + kilobyte = 1000 + Megabyte = 1000 * 1000 + Gigabyte = 1000 * 1000 * 1000 +) + +type K8sCapacityService interface { + GetClusterCapacityDetailList(clusters []*cluster.ClusterBean) ([]*ClusterCapacityDetail, error) + GetClusterCapacityDetail(cluster *cluster.ClusterBean, callForList bool) (*ClusterCapacityDetail, error) + GetNodeCapacityDetailsListByCluster(cluster *cluster.ClusterBean) ([]*NodeCapacityDetail, error) + GetNodeCapacityDetailByNameAndCluster(cluster *cluster.ClusterBean, name string) (*NodeCapacityDetail, error) + UpdateNodeManifest(request *NodeManifestUpdateDto) (*application.ManifestResponse, error) +} +type K8sCapacityServiceImpl struct { + logger *zap.SugaredLogger + clusterService cluster.ClusterService + k8sApplicationService K8sApplicationService + k8sClientService application.K8sClientService + clusterCronService ClusterCronService +} + +func NewK8sCapacityServiceImpl(Logger *zap.SugaredLogger, + clusterService cluster.ClusterService, + k8sApplicationService K8sApplicationService, + k8sClientService application.K8sClientService, + clusterCronService ClusterCronService) *K8sCapacityServiceImpl { + return &K8sCapacityServiceImpl{ + logger: Logger, + clusterService: clusterService, + k8sApplicationService: k8sApplicationService, + k8sClientService: k8sClientService, + clusterCronService: clusterCronService, + } +} + +func (impl *K8sCapacityServiceImpl) GetClusterCapacityDetailList(clusters []*cluster.ClusterBean) ([]*ClusterCapacityDetail, error) { + var clustersDetails []*ClusterCapacityDetail + for _, cluster := range clusters { + clusterCapacityDetail := &ClusterCapacityDetail{} + var err error + if len(cluster.ErrorInConnecting) > 0 { + clusterCapacityDetail.ErrorInConnection = cluster.ErrorInConnecting + } else { + clusterCapacityDetail, err = impl.GetClusterCapacityDetail(cluster, true) + if err != nil { + impl.logger.Errorw("error in getting cluster capacity details by id", "err", err) + clusterCapacityDetail = &ClusterCapacityDetail{ + ErrorInConnection: err.Error(), + } + } + } + clusterCapacityDetail.Id = cluster.Id + clusterCapacityDetail.Name = cluster.ClusterName + clustersDetails = append(clustersDetails, clusterCapacityDetail) + } + return clustersDetails, nil +} + +func (impl *K8sCapacityServiceImpl) GetClusterCapacityDetail(cluster *cluster.ClusterBean, callForList bool) (*ClusterCapacityDetail, error) { + //getting rest config by clusterId + restConfig, err := impl.k8sApplicationService.GetRestConfigByCluster(cluster) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster", "err", err, "clusterId", cluster.Id) + return nil, err + } + //getting kubernetes clientSet by rest config + k8sClientSet, err := kubernetes.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting client set by rest config", "err", err, "restConfig", restConfig) + return nil, err + } + clusterDetail := &ClusterCapacityDetail{} + nodeList, err := k8sClientSet.CoreV1().Nodes().List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting node list", "err", err, "clusterId", cluster.Id) + return nil, err + } + var clusterCpuCapacity resource.Quantity + var clusterMemoryCapacity resource.Quantity + var clusterCpuAllocatable resource.Quantity + var clusterMemoryAllocatable resource.Quantity + nodeCount := 0 + nodesK8sVersionMap := make(map[string]bool) + //map of node condition and name of all nodes that condition is true on + nodeErrors := make(map[metav1.NodeConditionType][]string) + var nodesK8sVersion []string + for _, node := range nodeList.Items { + errorsInNode := findNodeErrors(&node) + for conditionName := range errorsInNode { + if nodeNames, ok := nodeErrors[conditionName]; ok { + nodeNames = append(nodeNames, node.Name) + nodeErrors[conditionName] = nodeNames + } else { + nodeErrors[conditionName] = []string{node.Name} + } + } + nodeCount += 1 + if _, ok := nodesK8sVersionMap[node.Status.NodeInfo.KubeletVersion]; !ok { + nodesK8sVersionMap[node.Status.NodeInfo.KubeletVersion] = true + nodesK8sVersion = append(nodesK8sVersion, node.Status.NodeInfo.KubeletVersion) + } + clusterCpuCapacity.Add(node.Status.Capacity[metav1.ResourceCPU]) + clusterMemoryCapacity.Add(node.Status.Capacity[metav1.ResourceMemory]) + clusterCpuAllocatable.Add(node.Status.Allocatable[metav1.ResourceCPU]) + clusterMemoryAllocatable.Add(node.Status.Allocatable[metav1.ResourceMemory]) + } + clusterDetail.NodeErrors = nodeErrors + clusterDetail.NodeK8sVersions = nodesK8sVersion + clusterDetail.Cpu = &ResourceDetailObject{ + Capacity: getResourceString(clusterCpuCapacity, metav1.ResourceCPU), + } + clusterDetail.Memory = &ResourceDetailObject{ + Capacity: getResourceString(clusterMemoryCapacity, metav1.ResourceMemory), + } + if callForList { + //assigning additional data for cluster listing api call + clusterDetail.NodeCount = nodeCount + //getting serverVersion + serverVersion, err := k8sClientSet.DiscoveryClient.ServerVersion() + if err != nil { + impl.logger.Errorw("error in getting server version", "err", err, "clusterId", cluster.Id) + return nil, err + } + clusterDetail.ServerVersion = serverVersion.GitVersion + } else { + //update data for cluster detail api call + //getting metrics clientSet by rest config + metricsClientSet, err := metrics.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting metrics client set", "err", err) + return nil, err + } + //empty namespace: get pods for all namespaces + podList, err := k8sClientSet.CoreV1().Pods("").List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting pod list", "err", err) + return nil, err + } + var clusterCpuUsage resource.Quantity + var clusterMemoryUsage resource.Quantity + var clusterCpuLimits resource.Quantity + var clusterCpuRequests resource.Quantity + var clusterMemoryLimits resource.Quantity + var clusterMemoryRequests resource.Quantity + nmList, err := metricsClientSet.MetricsV1beta1().NodeMetricses().List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting nodeMetrics list", "err", err) + } else if nmList != nil { + for _, nm := range nmList.Items { + clusterCpuUsage.Add(nm.Usage[metav1.ResourceCPU]) + clusterMemoryUsage.Add(nm.Usage[metav1.ResourceMemory]) + } + clusterDetail.Cpu.UsagePercentage = convertToPercentage(&clusterCpuUsage, &clusterCpuAllocatable) + clusterDetail.Memory.UsagePercentage = convertToPercentage(&clusterMemoryUsage, &clusterMemoryAllocatable) + } + for _, pod := range podList.Items { + if pod.Status.Phase != metav1.PodSucceeded && pod.Status.Phase != metav1.PodFailed { + requests, limits := resourcehelper.PodRequestsAndLimits(&pod) + clusterCpuLimits.Add(limits[metav1.ResourceCPU]) + clusterCpuRequests.Add(requests[metav1.ResourceCPU]) + clusterMemoryLimits.Add(limits[metav1.ResourceMemory]) + clusterMemoryRequests.Add(requests[metav1.ResourceMemory]) + } + } + clusterDetail.Cpu.RequestPercentage = convertToPercentage(&clusterCpuRequests, &clusterCpuAllocatable) + clusterDetail.Cpu.LimitPercentage = convertToPercentage(&clusterCpuLimits, &clusterCpuAllocatable) + clusterDetail.Memory.RequestPercentage = convertToPercentage(&clusterMemoryRequests, &clusterMemoryAllocatable) + clusterDetail.Memory.LimitPercentage = convertToPercentage(&clusterMemoryLimits, &clusterMemoryAllocatable) + } + return clusterDetail, nil +} + +func (impl *K8sCapacityServiceImpl) GetNodeCapacityDetailsListByCluster(cluster *cluster.ClusterBean) ([]*NodeCapacityDetail, error) { + //getting rest config by clusterId + restConfig, err := impl.k8sApplicationService.GetRestConfigByCluster(cluster) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster", "err", err, "clusterId", cluster.Id) + return nil, err + } + //getting kubernetes clientSet by rest config + k8sClientSet, err := kubernetes.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting client set by rest config", "err", err, "restConfig", restConfig) + return nil, err + } + //getting metrics clientSet by rest config + metricsClientSet, err := metrics.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting metrics client set", "err", err) + return nil, err + } + nodeMetricsList, err := metricsClientSet.MetricsV1beta1().NodeMetricses().List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting node metrics", "err", err) + } + nodeList, err := k8sClientSet.CoreV1().Nodes().List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting node list", "err", err, "clusterId", cluster.Id) + return nil, err + } + //empty namespace: get pods for all namespaces + podList, err := k8sClientSet.CoreV1().Pods("").List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting pod list", "err", err) + return nil, err + } + nodeResourceUsage := make(map[string]metav1.ResourceList) + if nodeMetricsList != nil { + for _, nodeMetrics := range nodeMetricsList.Items { + nodeResourceUsage[nodeMetrics.Name] = nodeMetrics.Usage + } + } + var nodeDetails []*NodeCapacityDetail + for _, node := range nodeList.Items { + nodeDetail, err := impl.getNodeDetail(&node, nodeResourceUsage, podList, true, restConfig) + if err != nil { + impl.logger.Errorw("error in getting node detail for list", "err", err) + return nil, err + } + nodeDetails = append(nodeDetails, nodeDetail) + } + return nodeDetails, nil +} + +func (impl *K8sCapacityServiceImpl) GetNodeCapacityDetailByNameAndCluster(cluster *cluster.ClusterBean, name string) (*NodeCapacityDetail, error) { + //getting rest config by clusterId + restConfig, err := impl.k8sApplicationService.GetRestConfigByCluster(cluster) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster", "err", err, "clusterId", cluster.Id) + return nil, err + } + //getting kubernetes clientSet by rest config + k8sClientSet, err := kubernetes.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting client set by rest config", "err", err, "restConfig", restConfig) + return nil, err + } + //getting metrics clientSet by rest config + metricsClientSet, err := metrics.NewForConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting metrics client set", "err", err) + return nil, err + } + nodeMetrics, err := metricsClientSet.MetricsV1beta1().NodeMetricses().Get(context.Background(), name, v1.GetOptions{}) + if err != nil { + impl.logger.Errorw("error in getting node metrics", "err", err) + } + node, err := k8sClientSet.CoreV1().Nodes().Get(context.Background(), name, v1.GetOptions{}) + if err != nil { + impl.logger.Errorw("error in getting node list", "err", err) + return nil, err + } + //empty namespace: get pods for all namespaces + podList, err := k8sClientSet.CoreV1().Pods("").List(context.Background(), v1.ListOptions{}) + if err != nil { + impl.logger.Errorw("error in getting pod list", "err", err) + return nil, err + } + nodeResourceUsage := make(map[string]metav1.ResourceList) + if nodeMetrics != nil { + nodeResourceUsage[nodeMetrics.Name] = nodeMetrics.Usage + } + nodeDetail, err := impl.getNodeDetail(node, nodeResourceUsage, podList, false, restConfig) + if err != nil { + impl.logger.Errorw("error in getting node detail", "err", err) + return nil, err + } + //updating cluster name + nodeDetail.ClusterName = cluster.ClusterName + return nodeDetail, nil +} +func (impl *K8sCapacityServiceImpl) getNodeDetail(node *metav1.Node, nodeResourceUsage map[string]metav1.ResourceList, podList *metav1.PodList, callForList bool, restConfig *rest.Config) (*NodeCapacityDetail, error) { + cpuAllocatable := node.Status.Allocatable[metav1.ResourceCPU] + memoryAllocatable := node.Status.Allocatable[metav1.ResourceMemory] + podCount := 0 + nodeRequestsResourceList := make(metav1.ResourceList) + nodeLimitsResourceList := make(metav1.ResourceList) + var podDetailList []*PodCapacityDetail + for _, pod := range podList.Items { + if pod.Spec.NodeName == node.Name { + if callForList { + podCount++ + } else { + var requests, limits metav1.ResourceList + if pod.Status.Phase != metav1.PodSucceeded && pod.Status.Phase != metav1.PodFailed { + requests, limits = resourcehelper.PodRequestsAndLimits(&pod) + nodeRequestsResourceList = AddTwoResourceList(nodeRequestsResourceList, requests) + nodeLimitsResourceList = AddTwoResourceList(nodeLimitsResourceList, limits) + } + podDetailList = append(podDetailList, getPodDetail(pod, cpuAllocatable, memoryAllocatable, limits, requests)) + } + } + } + var labels []*LabelAnnotationTaintObject + for k, v := range node.Labels { + labelObj := &LabelAnnotationTaintObject{ + Key: k, + Value: v, + } + labels = append(labels, labelObj) + } + nodeDetail := &NodeCapacityDetail{ + Name: node.Name, + K8sVersion: node.Status.NodeInfo.KubeletVersion, + Errors: findNodeErrors(node), + InternalIp: getNodeInternalIP(node), + ExternalIp: getNodeExternalIP(node), + Unschedulable: node.Spec.Unschedulable, + Roles: findNodeRoles(node), + Labels: labels, + Status: findNodeStatus(node), + TaintCount: len(node.Spec.Taints), + CreatedAt: node.CreationTimestamp.String(), + } + nodeUsageResourceList := nodeResourceUsage[node.Name] + if callForList { + // assigning additional data for node listing api call + nodeDetail.Age = translateTimestampSince(node.CreationTimestamp) + nodeDetail.PodCount = podCount + cpuUsage, cpuUsageOk := nodeUsageResourceList[metav1.ResourceCPU] + memoryUsage, memoryUsageOk := nodeUsageResourceList[metav1.ResourceMemory] + nodeDetail.Cpu = &ResourceDetailObject{ + Allocatable: getResourceString(cpuAllocatable, metav1.ResourceCPU), + AllocatableInBytes: cpuAllocatable.Value(), + } + nodeDetail.Memory = &ResourceDetailObject{ + Allocatable: getResourceString(memoryAllocatable, metav1.ResourceMemory), + AllocatableInBytes: memoryAllocatable.Value(), + } + if cpuUsageOk { + nodeDetail.Cpu.Usage = getResourceString(cpuUsage, metav1.ResourceCPU) + nodeDetail.Cpu.UsageInBytes = cpuUsage.Value() + nodeDetail.Cpu.UsagePercentage = convertToPercentage(&cpuUsage, &cpuAllocatable) + } + if memoryUsageOk { + nodeDetail.Memory.Usage = getResourceString(memoryUsage, metav1.ResourceMemory) + nodeDetail.Memory.UsageInBytes = memoryUsage.Value() + nodeDetail.Memory.UsagePercentage = convertToPercentage(&memoryUsage, &memoryAllocatable) + } + } else { + //update data for node detail api call + err := impl.updateAdditionalDetailForNode(nodeDetail, node, nodeLimitsResourceList, nodeRequestsResourceList, nodeUsageResourceList, podDetailList, restConfig) + if err != nil { + impl.logger.Errorw("error in getting updating data for node detail", "err", err) + return nil, err + } + } + return nodeDetail, nil +} + +func (impl *K8sCapacityServiceImpl) updateAdditionalDetailForNode(nodeDetail *NodeCapacityDetail, node *metav1.Node, + nodeLimitsResourceList metav1.ResourceList, nodeRequestsResourceList metav1.ResourceList, + nodeUsageResourceList metav1.ResourceList, podDetailList []*PodCapacityDetail, restConfig *rest.Config) error { + nodeDetail.Version = "v1" + nodeDetail.Kind = "Node" + nodeDetail.Pods = podDetailList + var annotations []*LabelAnnotationTaintObject + for k, v := range node.Annotations { + annotationObj := &LabelAnnotationTaintObject{ + Key: k, + Value: v, + } + annotations = append(annotations, annotationObj) + } + nodeDetail.Annotations = annotations + + var taints []*LabelAnnotationTaintObject + for _, taint := range node.Spec.Taints { + taintObj := &LabelAnnotationTaintObject{ + Key: taint.Key, + Value: taint.Value, + Effect: string(taint.Effect), + } + taints = append(taints, taintObj) + } + nodeDetail.Taints = taints + //map of {conditionType : isErrorCondition }, Valid/Non-error conditions to be updated with update at kubernetes end + NodeAllConditionsMap := map[metav1.NodeConditionType]bool{metav1.NodeReady: false, metav1.NodeMemoryPressure: true, + metav1.NodeDiskPressure: true, metav1.NodeNetworkUnavailable: true, metav1.NodePIDPressure: true} + var conditions []*NodeConditionObject + for _, condition := range node.Status.Conditions { + if isErrorCondition, ok := NodeAllConditionsMap[condition.Type]; ok { + conditionObj := &NodeConditionObject{ + Type: string(condition.Type), + Reason: condition.Reason, + Message: condition.Message, + } + if (!isErrorCondition && condition.Status == metav1.ConditionTrue) || (isErrorCondition && condition.Status == metav1.ConditionFalse) { + conditionObj.HaveIssue = false + } else { + conditionObj.HaveIssue = true + } + conditions = append(conditions, conditionObj) + } + } + nodeDetail.Conditions = conditions + + nodeCapacityResourceList := node.Status.Capacity + nodeAllocatableResourceList := node.Status.Allocatable + for resourceName, allocatable := range nodeAllocatableResourceList { + limits, limitsOk := nodeLimitsResourceList[resourceName] + requests, requestsOk := nodeRequestsResourceList[resourceName] + usage, usageOk := nodeUsageResourceList[resourceName] + capacity := nodeCapacityResourceList[resourceName] + r := &ResourceDetailObject{ + ResourceName: string(resourceName), + Allocatable: getResourceString(allocatable, resourceName), + Capacity: getResourceString(capacity, resourceName), + } + if limitsOk { + r.Limit = getResourceString(limits, resourceName) + r.LimitPercentage = convertToPercentage(&limits, &allocatable) + } + if requestsOk { + r.Request = getResourceString(requests, resourceName) + r.RequestPercentage = convertToPercentage(&requests, &allocatable) + } + if usageOk { + r.Usage = getResourceString(usage, resourceName) + r.UsagePercentage = convertToPercentage(&usage, &allocatable) + } + nodeDetail.Resources = append(nodeDetail.Resources, r) + } + //getting manifest + manifestRequest := &application.K8sRequestBean{ + ResourceIdentifier: application.ResourceIdentifier{ + Name: node.Name, + GroupVersionKind: schema.GroupVersionKind{ + Version: nodeDetail.Version, + Kind: nodeDetail.Kind, + }, + }, + } + manifestResponse, err := impl.k8sClientService.GetResource(restConfig, manifestRequest) + if err != nil { + impl.logger.Errorw("error in getting node manifest", "err", err) + return err + } + nodeDetail.Manifest = manifestResponse.Manifest + return nil +} + +func (impl *K8sCapacityServiceImpl) UpdateNodeManifest(request *NodeManifestUpdateDto) (*application.ManifestResponse, error) { + //getting rest config by clusterId + restConfig, err := impl.k8sApplicationService.GetRestConfigByClusterId(request.ClusterId) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster id", "err", err, "clusterId", request.ClusterId) + return nil, err + } + manifestUpdateReq := &application.K8sRequestBean{ + ResourceIdentifier: application.ResourceIdentifier{ + Name: request.Name, + GroupVersionKind: schema.GroupVersionKind{ + Group: "", + Version: request.Version, + Kind: request.Kind, + }, + }, + Patch: request.ManifestPatch, + } + manifestResponse, err := impl.k8sClientService.UpdateResource(restConfig, manifestUpdateReq) + if err != nil { + impl.logger.Errorw("error in updating node manifest", "err", err) + return nil, err + } + return manifestResponse, nil +} +func getPodDetail(pod metav1.Pod, cpuAllocatable resource.Quantity, memoryAllocatable resource.Quantity, limits metav1.ResourceList, requests metav1.ResourceList) *PodCapacityDetail { + cpuLimits, cpuLimitsOk := limits[metav1.ResourceCPU] + cpuRequests, cpuRequestsOk := requests[metav1.ResourceCPU] + memoryLimits, memoryLimitsOk := limits[metav1.ResourceMemory] + memoryRequests, memoryRequestsOk := requests[metav1.ResourceMemory] + podDetail := &PodCapacityDetail{ + Name: pod.Name, + Namespace: pod.Namespace, + Age: translateTimestampSince(pod.CreationTimestamp), + CreatedAt: pod.CreationTimestamp.String(), + Cpu: &ResourceDetailObject{ + Limit: getResourceString(cpuLimits, metav1.ResourceCPU), + Request: getResourceString(cpuRequests, metav1.ResourceCPU), + }, + Memory: &ResourceDetailObject{ + Limit: getResourceString(memoryLimits, metav1.ResourceMemory), + Request: getResourceString(memoryRequests, metav1.ResourceMemory), + }, + } + if cpuLimitsOk { + podDetail.Cpu.LimitPercentage = convertToPercentage(&cpuLimits, &cpuAllocatable) + } + if cpuRequestsOk { + podDetail.Cpu.RequestPercentage = convertToPercentage(&cpuRequests, &cpuAllocatable) + } + if memoryLimitsOk { + podDetail.Memory.LimitPercentage = convertToPercentage(&memoryLimits, &memoryAllocatable) + } + if memoryRequestsOk { + podDetail.Memory.RequestPercentage = convertToPercentage(&memoryRequests, &memoryAllocatable) + } + return podDetail +} +func convertToPercentage(actual, allocatable *resource.Quantity) string { + if actual == nil || allocatable == nil { + return "" + } + utilPercent := float64(0) + if allocatable.MilliValue() > 0 { + utilPercent = float64(actual.MilliValue()) / float64(allocatable.MilliValue()) * 100 + } + return fmt.Sprintf("%d%%", int64(utilPercent)) +} + +func getResourceString(quantity resource.Quantity, resourceName metav1.ResourceName) string { + standardResources := map[metav1.ResourceName]bool{metav1.ResourceCPU: true, metav1.ResourceMemory: true, metav1.ResourceStorage: true, metav1.ResourceEphemeralStorage: true} + + if _, ok := standardResources[resourceName]; !ok { + //not a standard resource, we do not know if conversion would be valid or not + //for example - pods: "250", this is not in bytes but an integer so conversion is invalid + return quantity.String() + } else { + var quantityStr string + value := quantity.Value() + valueGi := value / Gibibyte + //allowing remainder 0 only, because for Gi rounding off will be highly erroneous + if valueGi > 1 && value%Gibibyte == 0 { + quantityStr = fmt.Sprintf("%dGi", valueGi) + } else { + valueMi := value / Mebibyte + if valueMi > 10 { + if value%Mebibyte != 0 { + valueMi++ + } + quantityStr = fmt.Sprintf("%dMi", valueMi) + } else if value > 1000 { + valueKi := value / Kibibyte + if value%Kibibyte != 0 { + valueKi++ + } + quantityStr = fmt.Sprintf("%dKi", valueKi) + } else { + quantityStr = fmt.Sprintf("%dm", quantity.MilliValue()) + } + } + return quantityStr + } +} + +func translateTimestampSince(timestamp v1.Time) string { + if timestamp.IsZero() { + return "" + } + return duration.HumanDuration(time.Since(timestamp.Time)) +} + +func findNodeRoles(node *metav1.Node) []string { + roles := sets.NewString() + for k, v := range node.Labels { + switch { + case strings.HasPrefix(k, labelNodeRolePrefix): + if role := strings.TrimPrefix(k, labelNodeRolePrefix); len(role) > 0 { + roles.Insert(role) + } + case k == nodeLabelRole && v != "": + roles.Insert(v) + } + } + if roles.Len() > 0 { + return roles.List() + } else { + return []string{"none"} + } +} + +func findNodeStatus(node *metav1.Node) string { + conditionMap := make(map[metav1.NodeConditionType]*metav1.NodeCondition) + //Valid conditions to be updated with update at kubernetes end + NodeAllValidConditions := []metav1.NodeConditionType{metav1.NodeReady} + for _, condition := range node.Status.Conditions { + conditionMap[condition.Type] = &condition + } + var status string + for _, validCondition := range NodeAllValidConditions { + if condition, ok := conditionMap[validCondition]; ok { + if condition.Status == metav1.ConditionTrue { + status = string(condition.Type) + } else { + status = fmt.Sprintf("Not %s", string(condition.Type)) + } + } + } + if len(status) == 0 { + status = "Unknown" + } + return status +} + +func findNodeErrors(node *metav1.Node) map[metav1.NodeConditionType]string { + conditionMap := make(map[metav1.NodeConditionType]metav1.NodeCondition) + NodeAllErrorConditions := []metav1.NodeConditionType{metav1.NodeMemoryPressure, metav1.NodeDiskPressure, metav1.NodeNetworkUnavailable, metav1.NodePIDPressure} + for _, condition := range node.Status.Conditions { + conditionMap[condition.Type] = condition + } + conditionErrorMap := make(map[metav1.NodeConditionType]string) + for _, errorCondition := range NodeAllErrorConditions { + if condition, ok := conditionMap[errorCondition]; ok { + //todo: update from true to false + if condition.Status == metav1.ConditionFalse { + conditionErrorMap[condition.Type] = condition.Message + } + } + } + return conditionErrorMap +} + +func getNodeExternalIP(node *metav1.Node) string { + for _, address := range node.Status.Addresses { + if address.Type == metav1.NodeExternalIP { + return address.Address + } + } + return "none" +} + +func getNodeInternalIP(node *metav1.Node) string { + for _, address := range node.Status.Addresses { + if address.Type == metav1.NodeInternalIP { + return address.Address + } + } + return "none" +} + +func AddTwoResourceList(oldResourceList metav1.ResourceList, newResourceList metav1.ResourceList) metav1.ResourceList { + for res, quantity := range newResourceList { + if oldQuantity, ok1 := oldResourceList[res]; ok1 { + quantity.Add(oldQuantity) + } + oldResourceList[res] = quantity + } + return oldResourceList +} diff --git a/util/k8s/wire_k8sApp.go b/util/k8s/wire_k8sApp.go index 597d878124..1cedfb2e72 100644 --- a/util/k8s/wire_k8sApp.go +++ b/util/k8s/wire_k8sApp.go @@ -18,8 +18,15 @@ var K8sApplicationWireSet = wire.NewSet( wire.Bind(new(application2.K8sClientService), new(*application2.K8sClientServiceImpl)), terminal.NewTerminalSessionHandlerImpl, wire.Bind(new(terminal.TerminalSessionHandler), new(*terminal.TerminalSessionHandlerImpl)), - + NewK8sCapacityRouterImpl, + wire.Bind(new(K8sCapacityRouter), new(*K8sCapacityRouterImpl)), + NewK8sCapacityRestHandlerImpl, + wire.Bind(new(K8sCapacityRestHandler), new(*K8sCapacityRestHandlerImpl)), + NewK8sCapacityServiceImpl, + wire.Bind(new(K8sCapacityService), new(*K8sCapacityServiceImpl)), informer.NewGlobalMapClusterNamespace, informer.NewK8sInformerFactoryImpl, wire.Bind(new(informer.K8sInformerFactory), new(*informer.K8sInformerFactoryImpl)), + NewClusterCronServiceImpl, + wire.Bind(new(ClusterCronService), new(*ClusterCronServiceImpl)), ) diff --git a/vendor/k8s.io/metrics/LICENSE b/vendor/k8s.io/metrics/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/k8s.io/metrics/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go new file mode 100644 index 0000000000..49e2997268 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=metrics.k8s.io + +// Package metrics defines an API for exposing metics. +package metrics // import "k8s.io/metrics/pkg/apis/metrics" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go new file mode 100644 index 0000000000..9384e44b91 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go new file mode 100644 index 0000000000..f1c58c7684 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time + Window metav1.Duration + + // The memory usage is the memory working set. + Usage corev1.ResourceList +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta + + // List of node metrics. + Items []NodeMetrics +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time + Window metav1.Duration + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta + + // List of pod metrics. + Items []PodMetrics +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string + // The memory usage is the memory working set. + Usage corev1.ResourceList +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go new file mode 100644 index 0000000000..8e06b22054 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics +// +k8s:openapi-gen=true +// +groupName=metrics.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the metrics API. +package v1alpha1 // import "k8s.io/metrics/pkg/apis/metrics/v1alpha1" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..19355d77f3 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go @@ -0,0 +1,1758 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } +func (*ContainerMetrics) ProtoMessage() {} +func (*ContainerMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{0} +} +func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetrics.Merge(m, src) +} +func (m *ContainerMetrics) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo + +func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } +func (*NodeMetrics) ProtoMessage() {} +func (*NodeMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{1} +} +func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetrics.Merge(m, src) +} +func (m *NodeMetrics) XXX_Size() int { + return m.Size() +} +func (m *NodeMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo + +func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } +func (*NodeMetricsList) ProtoMessage() {} +func (*NodeMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{2} +} +func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsList.Merge(m, src) +} +func (m *NodeMetricsList) XXX_Size() int { + return m.Size() +} +func (m *NodeMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo + +func (m *PodMetrics) Reset() { *m = PodMetrics{} } +func (*PodMetrics) ProtoMessage() {} +func (*PodMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{3} +} +func (m *PodMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetrics.Merge(m, src) +} +func (m *PodMetrics) XXX_Size() int { + return m.Size() +} +func (m *PodMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetrics proto.InternalMessageInfo + +func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } +func (*PodMetricsList) ProtoMessage() {} +func (*PodMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{4} +} +func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetricsList.Merge(m, src) +} +func (m *PodMetricsList) XXX_Size() int { + return m.Size() +} +func (m *PodMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.ContainerMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.ContainerMetrics.UsageEntry") + proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetrics.UsageEntry") + proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetricsList") + proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.PodMetrics") + proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.PodMetricsList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto", fileDescriptor_4bcbecebae081ea6) +} + +var fileDescriptor_4bcbecebae081ea6 = []byte{ + // 658 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x54, 0x41, 0x4f, 0x13, 0x41, + 0x18, 0xed, 0xd0, 0x96, 0xc0, 0x54, 0x11, 0xf7, 0x44, 0x7a, 0xd8, 0x92, 0x9e, 0x1a, 0x13, 0x66, + 0x85, 0xa0, 0x21, 0x9c, 0xcc, 0x0a, 0x07, 0x13, 0x41, 0xd9, 0xa0, 0x46, 0xf4, 0xe0, 0x74, 0x3b, + 0x6e, 0xc7, 0xb2, 0x33, 0x9b, 0x99, 0xd9, 0x92, 0xde, 0x8c, 0x7a, 0xf2, 0x64, 0xe2, 0x9f, 0xc2, + 0x78, 0xe1, 0xc8, 0x45, 0x90, 0xf5, 0xee, 0x0f, 0xf0, 0x64, 0x76, 0x3a, 0xdb, 0xad, 0x14, 0xa1, + 0x72, 0xf0, 0xc4, 0x6d, 0xf7, 0x9b, 0x79, 0xef, 0x7d, 0xf3, 0xbe, 0x37, 0x03, 0xb7, 0x3a, 0x2b, + 0x12, 0x51, 0xee, 0x74, 0xe2, 0x26, 0x11, 0x8c, 0x28, 0x22, 0x9d, 0x2e, 0x61, 0x2d, 0x2e, 0x1c, + 0xb3, 0x10, 0x12, 0x25, 0xa8, 0x2f, 0x9d, 0xa8, 0x13, 0x38, 0x38, 0xa2, 0x72, 0x50, 0xe8, 0x2e, + 0xe2, 0xdd, 0xa8, 0x8d, 0x17, 0x9d, 0x80, 0x30, 0x22, 0xb0, 0x22, 0x2d, 0x14, 0x09, 0xae, 0xb8, + 0xd5, 0xe8, 0x23, 0x91, 0xd9, 0x88, 0xa2, 0x4e, 0x80, 0x52, 0xe4, 0xa0, 0x90, 0x21, 0xab, 0x0b, + 0x01, 0x55, 0xed, 0xb8, 0x89, 0x7c, 0x1e, 0x3a, 0x01, 0x0f, 0xb8, 0xa3, 0x09, 0x9a, 0xf1, 0x6b, + 0xfd, 0xa7, 0x7f, 0xf4, 0x57, 0x9f, 0xb8, 0x5a, 0x37, 0x2d, 0xe1, 0x88, 0x3a, 0x3e, 0x17, 0xc4, + 0xe9, 0x8e, 0x88, 0x57, 0x97, 0xf3, 0x3d, 0x21, 0xf6, 0xdb, 0x94, 0x11, 0xd1, 0xcb, 0x7a, 0x77, + 0x04, 0x91, 0x3c, 0x16, 0x3e, 0xf9, 0x27, 0x94, 0x3e, 0x31, 0x3e, 0x4b, 0xcb, 0xf9, 0x1b, 0x4a, + 0xc4, 0x4c, 0xd1, 0x70, 0x54, 0xe6, 0xee, 0x45, 0x00, 0xe9, 0xb7, 0x49, 0x88, 0x4f, 0xe3, 0xea, + 0xef, 0x8b, 0x70, 0xf6, 0x3e, 0x67, 0x0a, 0xa7, 0x88, 0x8d, 0xbe, 0x8b, 0xd6, 0x3c, 0x2c, 0x31, + 0x1c, 0x92, 0x39, 0x30, 0x0f, 0x1a, 0xd3, 0xee, 0xb5, 0xfd, 0xa3, 0x5a, 0x21, 0x39, 0xaa, 0x95, + 0x36, 0x71, 0x48, 0x3c, 0xbd, 0x62, 0x25, 0x00, 0x96, 0x63, 0x89, 0x03, 0x32, 0x37, 0x31, 0x5f, + 0x6c, 0x54, 0x96, 0xd6, 0xd1, 0xb8, 0x93, 0x41, 0xa7, 0xd5, 0xd0, 0x93, 0x94, 0x67, 0x9d, 0x29, + 0xd1, 0x73, 0x3f, 0x00, 0xa3, 0x55, 0xd6, 0xc5, 0x5f, 0x47, 0xb5, 0xda, 0xe8, 0x60, 0x90, 0x67, + 0xbc, 0x7e, 0x48, 0xa5, 0x7a, 0x77, 0x7c, 0xee, 0x96, 0xb4, 0xe5, 0x8f, 0xc7, 0xb5, 0x85, 0x71, + 0x46, 0x87, 0xb6, 0x62, 0xcc, 0x14, 0x55, 0x3d, 0xaf, 0x7f, 0xb4, 0x6a, 0x1b, 0xc2, 0xbc, 0x37, + 0x6b, 0x16, 0x16, 0x3b, 0xa4, 0xd7, 0xf7, 0xc4, 0x4b, 0x3f, 0xad, 0x35, 0x58, 0xee, 0xe2, 0xdd, + 0x38, 0xf5, 0x00, 0x34, 0x2a, 0x4b, 0x28, 0xf3, 0x60, 0x58, 0x25, 0x33, 0x02, 0x9d, 0xa1, 0xa2, + 0xc1, 0xab, 0x13, 0x2b, 0xa0, 0xfe, 0xb3, 0x04, 0x2b, 0x9b, 0xbc, 0x45, 0xb2, 0x01, 0xbc, 0x82, + 0x53, 0x69, 0x32, 0x5a, 0x58, 0x61, 0x2d, 0x58, 0x59, 0xba, 0x7d, 0x1e, 0xb9, 0x76, 0x19, 0xa3, + 0xee, 0x22, 0x7a, 0xd4, 0x7c, 0x43, 0x7c, 0xb5, 0x41, 0x14, 0x76, 0x2d, 0x63, 0x25, 0xcc, 0x6b, + 0xde, 0x80, 0xd5, 0x7a, 0x01, 0xa7, 0xd3, 0x58, 0x48, 0x85, 0xc3, 0xc8, 0xf4, 0x7f, 0x6b, 0x3c, + 0x89, 0x6d, 0x1a, 0x12, 0xf7, 0xa6, 0x21, 0x9f, 0xde, 0xce, 0x48, 0xbc, 0x9c, 0xcf, 0x7a, 0x0a, + 0x27, 0xf7, 0x28, 0x6b, 0xf1, 0xbd, 0xb9, 0xe2, 0xc5, 0xce, 0xe4, 0xcc, 0x6b, 0xb1, 0xc0, 0x8a, + 0x72, 0xe6, 0xce, 0x18, 0xf6, 0xc9, 0x67, 0x9a, 0xc5, 0x33, 0x6c, 0xd6, 0xb7, 0x41, 0xea, 0x4a, + 0x3a, 0x75, 0xf7, 0xc6, 0x4f, 0xdd, 0x90, 0xbb, 0x57, 0x81, 0x03, 0xf5, 0xaf, 0x00, 0xde, 0x18, + 0xb2, 0x24, 0x3d, 0x98, 0xf5, 0x72, 0x24, 0x74, 0x63, 0xce, 0x2d, 0x45, 0xeb, 0xc8, 0xcd, 0x1a, + 0x33, 0xa7, 0xb2, 0xca, 0x50, 0xe0, 0x76, 0x60, 0x99, 0x2a, 0x12, 0x4a, 0xf3, 0x60, 0xdc, 0xb9, + 0xd4, 0xe8, 0xdc, 0xeb, 0xd9, 0xb8, 0x1e, 0xa4, 0x5c, 0x5e, 0x9f, 0xb2, 0xfe, 0xb9, 0x08, 0xe1, + 0x63, 0xde, 0xba, 0xba, 0x3d, 0xe7, 0xde, 0x1e, 0x06, 0xa1, 0x9f, 0xbd, 0xbd, 0xd2, 0xdc, 0xa0, + 0xd5, 0xcb, 0xbf, 0xdb, 0xb9, 0x45, 0x83, 0x15, 0xe9, 0x0d, 0x29, 0xd4, 0xbf, 0x00, 0x38, 0x93, + 0x4f, 0xe5, 0x3f, 0x44, 0xec, 0xf9, 0x9f, 0x11, 0x5b, 0x1e, 0xff, 0x6c, 0x79, 0x9b, 0x67, 0x27, + 0xcc, 0x45, 0xfb, 0x27, 0x76, 0xe1, 0xe0, 0xc4, 0x2e, 0x1c, 0x9e, 0xd8, 0x85, 0xb7, 0x89, 0x0d, + 0xf6, 0x13, 0x1b, 0x1c, 0x24, 0x36, 0x38, 0x4c, 0x6c, 0xf0, 0x3d, 0xb1, 0xc1, 0xa7, 0x1f, 0x76, + 0x61, 0x67, 0x2a, 0x23, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xe6, 0x48, 0x8b, 0xfc, 0x08, + 0x00, 0x00, +} + +func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&ContainerMetrics{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&NodeMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NodeMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ContainerMetrics{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&PodMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NodeMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ContainerMetrics{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto new file mode 100644 index 0000000000..1810fb0386 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.metrics.pkg.apis.metrics.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ContainerMetrics sets resource usage metrics of a container. +message ContainerMetrics { + // Container name corresponding to the one from pod.spec.containers. + optional string name = 1; + + // The memory usage is the memory working set. + map usage = 2; +} + +// NodeMetrics sets resource usage metrics of a node. +message NodeMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // The memory usage is the memory working set. + map usage = 4; +} + +// NodeMetricsList is a list of NodeMetrics. +message NodeMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of node metrics. + repeated NodeMetrics items = 2; +} + +// PodMetrics sets resource usage metrics of a pod. +message PodMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // Metrics for all containers are collected within the same time window. + repeated ContainerMetrics containers = 4; +} + +// PodMetricsList is a list of PodMetrics. +message PodMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod metrics. + repeated PodMetrics items = 2; +} + diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go new file mode 100644 index 0000000000..3e5359a8ee --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go new file mode 100644 index 0000000000..871a3b1777 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of node metrics. + Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod metrics. + Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000000..f29d646594 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,209 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + metrics "k8s.io/metrics/pkg/apis/metrics" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. +func Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) +} + +func autoConvert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics is an autogenerated conversion function. +func Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + return autoConvert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in, out, s) +} + +func autoConvert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. +func Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) +} + +func autoConvert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics is an autogenerated conversion function. +func Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + return autoConvert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in, out, s) +} + +func autoConvert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. +func Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + return autoConvert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) +} + +func autoConvert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList is an autogenerated conversion function. +func Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + return autoConvert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in, out, s) +} + +func autoConvert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. +func Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in, out, s) +} + +func autoConvert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics is an autogenerated conversion function. +func Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + return autoConvert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in, out, s) +} + +func autoConvert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. +func Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) +} + +func autoConvert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList is an autogenerated conversion function. +func Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + return autoConvert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in, out, s) +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9cd8619ecc --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go new file mode 100644 index 0000000000..10f5ab9fa5 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics +// +k8s:openapi-gen=true +// +groupName=metrics.k8s.io + +// Package v1beta1 is the v1beta1 version of the metrics API. +package v1beta1 // import "k8s.io/metrics/pkg/apis/metrics/v1beta1" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go new file mode 100644 index 0000000000..e7002f56bb --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go @@ -0,0 +1,1758 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } +func (*ContainerMetrics) ProtoMessage() {} +func (*ContainerMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{0} +} +func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetrics.Merge(m, src) +} +func (m *ContainerMetrics) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo + +func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } +func (*NodeMetrics) ProtoMessage() {} +func (*NodeMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{1} +} +func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetrics.Merge(m, src) +} +func (m *NodeMetrics) XXX_Size() int { + return m.Size() +} +func (m *NodeMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo + +func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } +func (*NodeMetricsList) ProtoMessage() {} +func (*NodeMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{2} +} +func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsList.Merge(m, src) +} +func (m *NodeMetricsList) XXX_Size() int { + return m.Size() +} +func (m *NodeMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo + +func (m *PodMetrics) Reset() { *m = PodMetrics{} } +func (*PodMetrics) ProtoMessage() {} +func (*PodMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{3} +} +func (m *PodMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetrics.Merge(m, src) +} +func (m *PodMetrics) XXX_Size() int { + return m.Size() +} +func (m *PodMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetrics proto.InternalMessageInfo + +func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } +func (*PodMetricsList) ProtoMessage() {} +func (*PodMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{4} +} +func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetricsList.Merge(m, src) +} +func (m *PodMetricsList) XXX_Size() int { + return m.Size() +} +func (m *PodMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics.UsageEntry") + proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics.UsageEntry") + proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetricsList") + proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetrics") + proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetricsList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto", fileDescriptor_3e7a045767f4b09f) +} + +var fileDescriptor_3e7a045767f4b09f = []byte{ + // 659 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x54, 0xbf, 0x6f, 0x13, 0x3f, + 0x1c, 0x8d, 0x9b, 0xa4, 0xdf, 0xd6, 0xf9, 0x52, 0xca, 0x4d, 0x55, 0x86, 0x4b, 0x95, 0x85, 0x0a, + 0xa9, 0x36, 0x2d, 0x15, 0x2a, 0x2c, 0x48, 0x47, 0x19, 0x90, 0x68, 0x29, 0xa7, 0xf2, 0x9b, 0x01, + 0xe7, 0x62, 0x2e, 0x26, 0xdc, 0x39, 0xb2, 0x7d, 0xa9, 0xb2, 0xa1, 0x8a, 0x89, 0x09, 0xf1, 0x57, + 0x45, 0x4c, 0x1d, 0x3b, 0xa0, 0x96, 0x84, 0x99, 0x7f, 0x80, 0x09, 0x9d, 0xcf, 0x97, 0x0b, 0x4d, + 0x69, 0x8f, 0x0e, 0x4c, 0xdd, 0xee, 0x3e, 0xf6, 0x7b, 0xef, 0xe3, 0xf7, 0x79, 0x36, 0xdc, 0x6e, + 0xaf, 0x4b, 0xc4, 0x38, 0x6e, 0x47, 0x0d, 0x2a, 0x42, 0xaa, 0xa8, 0xc4, 0x5d, 0x1a, 0x36, 0xb9, + 0xc0, 0x66, 0x21, 0xa0, 0x4a, 0x30, 0x4f, 0xe2, 0x4e, 0xdb, 0xc7, 0xa4, 0xc3, 0xe4, 0xa8, 0xd0, + 0x5d, 0x69, 0x50, 0x45, 0x56, 0xb0, 0x4f, 0x43, 0x2a, 0x88, 0xa2, 0x4d, 0xd4, 0x11, 0x5c, 0x71, + 0xeb, 0x6a, 0x02, 0x44, 0x66, 0x1f, 0xea, 0xb4, 0x7d, 0x14, 0x03, 0x47, 0x05, 0x03, 0xac, 0x2e, + 0xfb, 0x4c, 0xb5, 0xa2, 0x06, 0xf2, 0x78, 0x80, 0x7d, 0xee, 0x73, 0xac, 0xf1, 0x8d, 0xe8, 0x8d, + 0xfe, 0xd3, 0x3f, 0xfa, 0x2b, 0xe1, 0xad, 0xd6, 0x4d, 0x43, 0xa4, 0xc3, 0xb0, 0xc7, 0x05, 0xc5, + 0xdd, 0x09, 0xed, 0xea, 0x5a, 0xb6, 0x27, 0x20, 0x5e, 0x8b, 0x85, 0x54, 0xf4, 0xd2, 0xce, 0xb1, + 0xa0, 0x92, 0x47, 0xc2, 0xa3, 0x7f, 0x85, 0xd2, 0xe7, 0x25, 0x27, 0x69, 0xe1, 0x3f, 0xa1, 0x44, + 0x14, 0x2a, 0x16, 0x4c, 0xca, 0xdc, 0x3c, 0x0b, 0x20, 0xbd, 0x16, 0x0d, 0xc8, 0x71, 0x5c, 0x7d, + 0xaf, 0x08, 0xe7, 0xef, 0xf2, 0x50, 0x91, 0x18, 0xb1, 0x99, 0x98, 0x68, 0x2d, 0xc2, 0x52, 0x48, + 0x02, 0xba, 0x00, 0x16, 0xc1, 0xd2, 0xac, 0xf3, 0x7f, 0xff, 0xb0, 0x56, 0x18, 0x1e, 0xd6, 0x4a, + 0x5b, 0x24, 0xa0, 0xae, 0x5e, 0xb1, 0x06, 0x00, 0x96, 0x23, 0x49, 0x7c, 0xba, 0x30, 0xb5, 0x58, + 0x5c, 0xaa, 0xac, 0x6e, 0xa0, 0x9c, 0x83, 0x41, 0xc7, 0xc5, 0xd0, 0xe3, 0x98, 0xe6, 0x5e, 0xa8, + 0x44, 0xcf, 0xf9, 0x00, 0x8c, 0x54, 0x59, 0x17, 0x7f, 0x1e, 0xd6, 0x6a, 0x93, 0x73, 0x41, 0xae, + 0xb1, 0xfa, 0x01, 0x93, 0x6a, 0xef, 0xe8, 0xd4, 0x2d, 0x71, 0xc7, 0x1f, 0x8f, 0x6a, 0xcb, 0x79, + 0x26, 0x87, 0x1e, 0x45, 0x24, 0x54, 0x4c, 0xf5, 0xdc, 0xe4, 0x64, 0xd5, 0x16, 0x84, 0x59, 0x6f, + 0xd6, 0x3c, 0x2c, 0xb6, 0x69, 0x2f, 0xb1, 0xc4, 0x8d, 0x3f, 0xad, 0x0d, 0x58, 0xee, 0x92, 0x77, + 0x51, 0x6c, 0x01, 0x58, 0xaa, 0xac, 0xa2, 0xd4, 0x82, 0x71, 0x95, 0xd4, 0x07, 0x74, 0x82, 0x8a, + 0x06, 0xdf, 0x9e, 0x5a, 0x07, 0xf5, 0x1f, 0x25, 0x58, 0xd9, 0xe2, 0x4d, 0x9a, 0xfa, 0xff, 0x1a, + 0xce, 0xc4, 0xc1, 0x68, 0x12, 0x45, 0xb4, 0x60, 0x65, 0xf5, 0xfa, 0x69, 0xe4, 0xda, 0x64, 0x82, + 0xba, 0x2b, 0xe8, 0x61, 0xe3, 0x2d, 0xf5, 0xd4, 0x26, 0x55, 0xc4, 0xb1, 0x8c, 0x95, 0x30, 0xab, + 0xb9, 0x23, 0x56, 0xeb, 0x25, 0x9c, 0x8d, 0x53, 0x21, 0x15, 0x09, 0x3a, 0xa6, 0xff, 0x6b, 0xf9, + 0x24, 0x76, 0x58, 0x40, 0x9d, 0x2b, 0x86, 0x7c, 0x76, 0x27, 0x25, 0x71, 0x33, 0x3e, 0xeb, 0x09, + 0x9c, 0xde, 0x65, 0x61, 0x93, 0xef, 0x2e, 0x14, 0xcf, 0x76, 0x26, 0x63, 0xde, 0x88, 0x04, 0x51, + 0x8c, 0x87, 0xce, 0x9c, 0x61, 0x9f, 0x7e, 0xaa, 0x59, 0x5c, 0xc3, 0x66, 0x7d, 0x1d, 0x85, 0xae, + 0xa4, 0x43, 0x77, 0x27, 0x77, 0xe8, 0xc6, 0xcc, 0xbd, 0xc8, 0x1b, 0xa8, 0x7f, 0x01, 0xf0, 0xf2, + 0x98, 0x25, 0xf1, 0xc1, 0xac, 0x57, 0x13, 0x99, 0xcb, 0x39, 0xb6, 0x18, 0xad, 0x13, 0x37, 0x6f, + 0xcc, 0x9c, 0x49, 0x2b, 0x63, 0x79, 0x7b, 0x0e, 0xcb, 0x4c, 0xd1, 0x40, 0x9a, 0xe7, 0x62, 0xed, + 0x3c, 0x93, 0x73, 0x2e, 0xa5, 0xd3, 0xba, 0x1f, 0x53, 0xb9, 0x09, 0x63, 0xfd, 0x73, 0x11, 0xc2, + 0x6d, 0xde, 0xbc, 0xb8, 0x3b, 0xa7, 0xde, 0x9d, 0x00, 0x42, 0x2f, 0x7d, 0x79, 0xa5, 0xb9, 0x3f, + 0xb7, 0xce, 0xfd, 0x68, 0x67, 0x0e, 0x8d, 0x56, 0xa4, 0x3b, 0x26, 0x50, 0xef, 0x03, 0x38, 0x97, + 0x0d, 0xe5, 0x1f, 0x04, 0xec, 0xd9, 0xef, 0x01, 0xbb, 0x91, 0xfb, 0x68, 0x59, 0x97, 0x27, 0xe7, + 0xcb, 0x59, 0xee, 0x0f, 0xec, 0xc2, 0xfe, 0xc0, 0x2e, 0x1c, 0x0c, 0xec, 0xc2, 0xfb, 0xa1, 0x0d, + 0xfa, 0x43, 0x1b, 0xec, 0x0f, 0x6d, 0x70, 0x30, 0xb4, 0xc1, 0xb7, 0xa1, 0x0d, 0x3e, 0x7d, 0xb7, + 0x0b, 0x2f, 0xfe, 0x33, 0x7c, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xc3, 0x3b, 0x02, 0xf4, + 0x08, 0x00, 0x00, +} + +func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&ContainerMetrics{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&NodeMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NodeMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ContainerMetrics{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&PodMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NodeMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ContainerMetrics{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto new file mode 100644 index 0000000000..a72d2eb404 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.metrics.pkg.apis.metrics.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ContainerMetrics sets resource usage metrics of a container. +message ContainerMetrics { + // Container name corresponding to the one from pod.spec.containers. + optional string name = 1; + + // The memory usage is the memory working set. + map usage = 2; +} + +// NodeMetrics sets resource usage metrics of a node. +message NodeMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // The memory usage is the memory working set. + map usage = 4; +} + +// NodeMetricsList is a list of NodeMetrics. +message NodeMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of node metrics. + repeated NodeMetrics items = 2; +} + +// PodMetrics sets resource usage metrics of a pod. +message PodMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // Metrics for all containers are collected within the same time window. + repeated ContainerMetrics containers = 4; +} + +// PodMetricsList is a list of PodMetrics. +message PodMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod metrics. + repeated PodMetrics items = 2; +} + diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go new file mode 100644 index 0000000000..205d253c77 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go new file mode 100644 index 0000000000..530797b5bf --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of node metrics. + Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod metrics. + Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..112c4c707d --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go @@ -0,0 +1,209 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + metrics "k8s.io/metrics/pkg/apis/metrics" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. +func Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) +} + +func autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics is an autogenerated conversion function. +func Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + return autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in, out, s) +} + +func autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. +func Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) +} + +func autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics is an autogenerated conversion function. +func Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + return autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in, out, s) +} + +func autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. +func Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + return autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) +} + +func autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList is an autogenerated conversion function. +func Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + return autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in, out, s) +} + +func autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_v1beta1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. +func Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in, out, s) +} + +func autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_metrics_PodMetrics_To_v1beta1_PodMetrics is an autogenerated conversion function. +func Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + return autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in, out, s) +} + +func autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. +func Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + return autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) +} + +func autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList is an autogenerated conversion function. +func Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + return autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in, out, s) +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f043d4642f --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c063c9b28a --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package metrics + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 0000000000..3141a8b450 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,130 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + metricsv1alpha1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface + MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + metricsV1alpha1 *metricsv1alpha1.MetricsV1alpha1Client + metricsV1beta1 *metricsv1beta1.MetricsV1beta1Client +} + +// MetricsV1alpha1 retrieves the MetricsV1alpha1Client +func (c *Clientset) MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface { + return c.metricsV1alpha1 +} + +// MetricsV1beta1 retrieves the MetricsV1beta1Client +func (c *Clientset) MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface { + return c.metricsV1beta1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.metricsV1alpha1, err = metricsv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.metricsV1beta1, err = metricsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.metricsV1alpha1 = metricsv1alpha1.New(c) + cs.metricsV1beta1 = metricsv1beta1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go new file mode 100644 index 0000000000..41721ca52d --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..7dc3756168 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..361fe99e15 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + metricsv1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + metricsv1alpha1.AddToScheme, + metricsv1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go new file mode 100644 index 0000000000..df51baa4d4 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..e8fc33bbb1 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type NodeMetricsExpansion interface{} + +type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go new file mode 100644 index 0000000000..efc23042d4 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +type MetricsV1alpha1Interface interface { + RESTClient() rest.Interface + NodeMetricsesGetter + PodMetricsesGetter +} + +// MetricsV1alpha1Client is used to interact with features provided by the metrics.k8s.io group. +type MetricsV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MetricsV1alpha1Client) NodeMetricses() NodeMetricsInterface { + return newNodeMetricses(c) +} + +func (c *MetricsV1alpha1Client) PodMetricses(namespace string) PodMetricsInterface { + return newPodMetricses(c, namespace) +} + +// NewForConfig creates a new MetricsV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MetricsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MetricsV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MetricsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MetricsV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MetricsV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MetricsV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MetricsV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MetricsV1alpha1Client { + return &MetricsV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MetricsV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go new file mode 100644 index 0000000000..d79163ddb8 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// NodeMetricsesGetter has a method to return a NodeMetricsInterface. +// A group's client should implement this interface. +type NodeMetricsesGetter interface { + NodeMetricses() NodeMetricsInterface +} + +// NodeMetricsInterface has methods to work with NodeMetrics resources. +type NodeMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodeMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodeMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + NodeMetricsExpansion +} + +// nodeMetricses implements NodeMetricsInterface +type nodeMetricses struct { + client rest.Interface +} + +// newNodeMetricses returns a NodeMetricses +func newNodeMetricses(c *MetricsV1alpha1Client) *nodeMetricses { + return &nodeMetricses{ + client: c.RESTClient(), + } +} + +// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. +func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeMetrics, err error) { + result = &v1alpha1.NodeMetrics{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. +func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.NodeMetricsList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodeMetricses. +func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go new file mode 100644 index 0000000000..49d57c8e88 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// PodMetricsesGetter has a method to return a PodMetricsInterface. +// A group's client should implement this interface. +type PodMetricsesGetter interface { + PodMetricses(namespace string) PodMetricsInterface +} + +// PodMetricsInterface has methods to work with PodMetrics resources. +type PodMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + PodMetricsExpansion +} + +// podMetricses implements PodMetricsInterface +type podMetricses struct { + client rest.Interface + ns string +} + +// newPodMetricses returns a PodMetricses +func newPodMetricses(c *MetricsV1alpha1Client, namespace string) *podMetricses { + return &podMetricses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. +func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodMetrics, err error) { + result = &v1alpha1.PodMetrics{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. +func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PodMetricsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podMetricses. +func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go new file mode 100644 index 0000000000..771101956f --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..a89ca3c780 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type NodeMetricsExpansion interface{} + +type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go new file mode 100644 index 0000000000..7a02cea2e5 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +type MetricsV1beta1Interface interface { + RESTClient() rest.Interface + NodeMetricsesGetter + PodMetricsesGetter +} + +// MetricsV1beta1Client is used to interact with features provided by the metrics.k8s.io group. +type MetricsV1beta1Client struct { + restClient rest.Interface +} + +func (c *MetricsV1beta1Client) NodeMetricses() NodeMetricsInterface { + return newNodeMetricses(c) +} + +func (c *MetricsV1beta1Client) PodMetricses(namespace string) PodMetricsInterface { + return newPodMetricses(c, namespace) +} + +// NewForConfig creates a new MetricsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MetricsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MetricsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MetricsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MetricsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new MetricsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MetricsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MetricsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *MetricsV1beta1Client { + return &MetricsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MetricsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go new file mode 100644 index 0000000000..a312221ed2 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// NodeMetricsesGetter has a method to return a NodeMetricsInterface. +// A group's client should implement this interface. +type NodeMetricsesGetter interface { + NodeMetricses() NodeMetricsInterface +} + +// NodeMetricsInterface has methods to work with NodeMetrics resources. +type NodeMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NodeMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NodeMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + NodeMetricsExpansion +} + +// nodeMetricses implements NodeMetricsInterface +type nodeMetricses struct { + client rest.Interface +} + +// newNodeMetricses returns a NodeMetricses +func newNodeMetricses(c *MetricsV1beta1Client) *nodeMetricses { + return &nodeMetricses{ + client: c.RESTClient(), + } +} + +// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. +func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NodeMetrics, err error) { + result = &v1beta1.NodeMetrics{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. +func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NodeMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.NodeMetricsList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodeMetricses. +func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go new file mode 100644 index 0000000000..e66c377c25 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// PodMetricsesGetter has a method to return a PodMetricsInterface. +// A group's client should implement this interface. +type PodMetricsesGetter interface { + PodMetricses(namespace string) PodMetricsInterface +} + +// PodMetricsInterface has methods to work with PodMetrics resources. +type PodMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + PodMetricsExpansion +} + +// podMetricses implements PodMetricsInterface +type podMetricses struct { + client rest.Interface + ns string +} + +// newPodMetricses returns a PodMetricses +func newPodMetricses(c *MetricsV1beta1Client, namespace string) *podMetricses { + return &podMetricses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. +func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodMetrics, err error) { + result = &v1beta1.PodMetrics{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. +func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodMetricsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podMetricses. +func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a4ff62e407..ffb4439eda 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1728,6 +1728,15 @@ k8s.io/kubernetes/pkg/apis/storage/v1alpha1 k8s.io/kubernetes/pkg/apis/storage/v1beta1 k8s.io/kubernetes/pkg/features k8s.io/kubernetes/pkg/util/parsers +# k8s.io/metrics v0.23.1 => k8s.io/metrics v0.23.1 +## explicit; go 1.16 +k8s.io/metrics/pkg/apis/metrics +k8s.io/metrics/pkg/apis/metrics/v1alpha1 +k8s.io/metrics/pkg/apis/metrics/v1beta1 +k8s.io/metrics/pkg/client/clientset/versioned +k8s.io/metrics/pkg/client/clientset/versioned/scheme +k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1 +k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 # k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 ## explicit; go 1.12 k8s.io/utils/buffer diff --git a/wire_gen.go b/wire_gen.go index b722928ed1..1a2acd1c96 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -598,7 +598,14 @@ func InitializeApp() (*App, error) { apiTokenRestHandlerImpl := apiToken2.NewApiTokenRestHandlerImpl(sugaredLogger, apiTokenServiceImpl, userServiceImpl, enforcerImpl, validate) apiTokenRouterImpl := apiToken2.NewApiTokenRouterImpl(apiTokenRestHandlerImpl) helmApplicationStatusUpdateHandlerImpl := cron.NewHelmApplicationStatusUpdateHandlerImpl(sugaredLogger, appServiceImpl, workflowDagExecutorImpl, installedAppServiceImpl, cdHandlerImpl) - muxRouter := router.NewMuxRouter(sugaredLogger, helmRouterImpl, pipelineConfigRouterImpl, migrateDbRouterImpl, appListingRouterImpl, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, applicationRouterImpl, cdRouterImpl, projectManagementRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, gitWebhookHandlerImpl, workflowStatusUpdateHandlerImpl, applicationStatusUpdateHandlerImpl, ciEventHandlerImpl, pubSubClient, userRouterImpl, cronBasedEventReceiverImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, testSuitRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appLabelRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, selfRegistrationRolesRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, helmApplicationStatusUpdateHandlerImpl) + clusterCronServiceImpl, err := k8s.NewClusterCronServiceImpl(sugaredLogger, clusterServiceImplExtended, k8sApplicationServiceImpl, clusterRepositoryImpl) + if err != nil { + return nil, err + } + k8sCapacityServiceImpl := k8s.NewK8sCapacityServiceImpl(sugaredLogger, clusterServiceImplExtended, k8sApplicationServiceImpl, k8sClientServiceImpl, clusterCronServiceImpl) + k8sCapacityRestHandlerImpl := k8s.NewK8sCapacityRestHandlerImpl(sugaredLogger, k8sCapacityServiceImpl, userServiceImpl, enforcerImpl, clusterServiceImplExtended, environmentServiceImpl) + k8sCapacityRouterImpl := k8s.NewK8sCapacityRouterImpl(k8sCapacityRestHandlerImpl) + muxRouter := router.NewMuxRouter(sugaredLogger, helmRouterImpl, pipelineConfigRouterImpl, migrateDbRouterImpl, appListingRouterImpl, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, applicationRouterImpl, cdRouterImpl, projectManagementRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, gitWebhookHandlerImpl, workflowStatusUpdateHandlerImpl, applicationStatusUpdateHandlerImpl, ciEventHandlerImpl, pubSubClient, userRouterImpl, cronBasedEventReceiverImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, testSuitRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appLabelRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, selfRegistrationRolesRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, helmApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl) mainApp := NewApp(muxRouter, sugaredLogger, sseSSE, versionServiceImpl, enforcer, db, pubSubClient, sessionManager) return mainApp, nil }